# \note PNG_LIBRARY and PNG_INCLUDE_PATH are promoted from external/libpng/CMakeLists.txt
endif ()
+# glslang
+add_subdirectory(external/glslang)
+
+# spirv-tools
+add_subdirectory(external/spirv-tools)
+
include_directories(${PNG_INCLUDE_PATH})
message(STATUS "DEQP_TARGET_NAME = ${DEQP_TARGET_NAME}")
framework/randomshaders
framework/egl
framework/egl/wrapper
+ external/vulkancts/framework/vulkan
)
if (DE_OS_IS_ANDROID OR DE_OS_IS_IOS)
endmacro (add_data_file)
add_subdirectory(framework)
+add_subdirectory(external/vulkancts/framework/vulkan)
if (DE_COMPILER_IS_MSC)
add_compile_options(/bigobj) # Required by glsBuiltinPrecisionTests.cpp
endif ()
add_subdirectory(modules)
+add_subdirectory(external/vulkancts/modules/vulkan)
# Single-binary targets
if (DE_OS_IS_ANDROID)
NativeLib(21, "arm64-v8a", 'android-arm64'), # ARM64 v8a ABI
]
-ANDROID_JAVA_API = "android-13"
+ANDROID_JAVA_API = "android-22"
NATIVE_LIB_NAME = "libdeqp.so"
def selectNDKPath ():
--- /dev/null
+<style type="text/css">
+
+code,div.listingblock {
+ max-width: 68em;
+}
+
+p {
+ max-width: 50em;
+}
+
+table {
+ max-width: 50em;
+}
+
+table.tableblock {
+ border-width: 1px;
+}
+
+h2 {
+ max-width: 35em;
+}
+
+</style>
--- /dev/null
+// asciidoc -b html5 -d book -f apitests.conf apitests.adoc
+
+:toc:
+:numbered:
+:docinfo:
+:revnumber: 4
+
+Vulkan API Test Plan
+====================
+
+NOTE: Document currently targets API revision 0.138.0
+
+This document currently outlines Vulkan API testing plan. The document splits API into features, and for each the important testing objectives are described. The technical implementation is not currently planned or documented here, except in select cases.
+
+In the future this document will likely evolve into a description of various tests and test coverage.
+
+Test framework
+--------------
+
+Test framework will provide tests access to Vulkan platform interface. In addition a library of generic utilties will be provided.
+
+Test case base class
+~~~~~~~~~~~~~~~~~~~~
+
+Vulkan test cases will use a slightly different interface from traditional +tcu::TestCase+ to facilitate following:
+
+ * Ability to generate shaders in high-level language, and pre-compile them without running the tests
+ * Cleaner separation between test case parameters and execution instance
+
+[source,cpp]
+----
+class TestCase : public tcu::TestCase
+{
+public:
+ TestCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description);
+ TestCase (tcu::TestContext& testCtx, tcu::TestNodeType type, const std::string& name, const std::string& description);
+ virtual ~TestCase (void) {}
+
+ virtual void initPrograms (vk::ProgramCollection<glu::ProgramSources>& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const = 0;
+
+ IterateResult iterate (void) { DE_ASSERT(false); return STOP; } // Deprecated in this module
+};
+
+class TestInstance
+{
+public:
+ TestInstance (Context& context) : m_context(context) {}
+ virtual ~TestInstance (void) {}
+
+ virtual tcu::TestStatus iterate (void) = 0;
+
+protected:
+ Context& m_context;
+};
+----
+
+In addition for simple tests a utility to wrap a function as a test case is provided:
+
+[source,cpp]
+----
+tcu::TestStatus createSamplerTest (Context& context)
+{
+ TestLog& log = context.getTestContext().getLog();
+ const DefaultDevice device (context.getPlatformInterface(), context.getTestContext().getCommandLine());
+ const VkDevice vkDevice = device.getDevice();
+ const DeviceInterface& vk = device.getInterface();
+
+ {
+ const struct VkSamplerCreateInfo samplerInfo =
+ {
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_TEX_FILTER_NEAREST, // VkTexFilter magFilter;
+ VK_TEX_FILTER_NEAREST, // VkTexFilter minFilter;
+ VK_TEX_MIPMAP_MODE_BASE, // VkTexMipmapMode mipMode;
+ VK_TEX_ADDRESS_CLAMP, // VkTexAddress addressU;
+ VK_TEX_ADDRESS_CLAMP, // VkTexAddress addressV;
+ VK_TEX_ADDRESS_CLAMP, // VkTexAddress addressW;
+ 0.0f, // float mipLodBias;
+ 0u, // deUint32 maxAnisotropy;
+ VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
+ 0.0f, // float minLod;
+ 0.0f, // float maxLod;
+ VK_BORDER_COLOR_TRANSPARENT_BLACK, // VkBorderColor borderColor;
+ };
+
+ Move<VkSamplerT> tmpSampler = createSampler(vk, vkDevice, &samplerInfo);
+ }
+
+ return tcu::TestStatus::pass("Creating sampler succeeded");
+}
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> apiTests (new tcu::TestCaseGroup(testCtx, "api", "API Tests"));
+
+ addFunctionCase(apiTests.get(), "create_sampler", "", createSamplerTest);
+
+ return apiTests.release();
+}
+----
+
++vkt::Context+, which is passed to +vkt::TestInstance+ will provide access to Vulkan platform interface, and a default device instance. Most test cases should use default device instance:
+
+ * Creating device can take up to tens of milliseconds
+ * --deqp-vk-device-id=N command line option can be used to change device
+ * Framework can force validation layers (--deqp-vk-layers=validation,...)
+
+Other considerations:
+
+ * Rather than using default header, deqp uses custom header & interface wrappers
+ ** See +vk::PlatformInterface+ and +vk::DeviceInterface+
+ ** Enables optional run-time dependency to Vulkan driver (required for Android, useful in general)
+ ** Various logging & other analysis facilities can be layered on top of that interface
+ * Expose validation state to tests to be able to test validation
+ * Extensions are opt-in, some tests will require certain extensions to work
+ ** --deqp-vk-extensions? enable all by default?
+ ** Probably good to be able to override extensions as well (verify that tests report correct results without extensions)
+
+Common utilities
+~~~~~~~~~~~~~~~~
+
+Test case independent Vulkan utilities will be provided in +vk+ namespace, and can be found under +framework/vulkan+. These include:
+
+ * +Unique<T>+ and +Move<T>+ wrappers for Vulkan API objects
+ * Creating all types of work with configurable parameters:
+ ** Workload "size" (not really comparable between types)
+ ** Consume & produce memory contents
+ *** Simple checksumming / other verification against reference data typically fine
+
+.TODO
+ * Document important utilities (vkRef.hpp for example).
+ * Document Vulkan platform port.
+
+Object management
+-----------------
+
+Object management tests verify that the driver is able to create and destroy objects of all types. The tests don't attempt to use the objects (unless necessary for testing object construction) as that is covered by feature-specific tests. For all object types the object management tests cover:
+
+ * Creating objects with a relevant set of parameters
+ ** Not exhaustive, guided by what might actually make driver to take different path
+ * Allocating multiple objects of same type
+ ** Reasonable limit depends on object type
+ * Creating objects from multiple threads concurrently (where possible)
+ * Freeing objects from multiple threads
+
+NOTE: tests for various +vkCreate*()+ functions are documented in feature-specific sections.
+
+Multithreaded scaling
+---------------------
+
+Vulkan API is free-threaded and suggests that many operations (such as constructing command buffers) will scale with number of app threads. Tests are needed for proving that such scalability actually exists, and there are no locks in important functionality preventing that.
+
+NOTE: Khronos CTS has not traditionally included any performance testing, and the tests may not be part of conformance criteria. The tests may however be useful for IHVs for driver optimization, and could be enforced by platform-specific conformance tests, such as Android CTS.
+
+Destructor functions
+~~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+VkResult VKAPI vkDestroyInstance(
+ VkInstance instance);
+
+VkResult VKAPI vkDestroyDevice(
+ VkDevice device);
+
+VkResult VKAPI vkDestroyFence(
+ VkDevice device,
+ VkFence fence);
+
+VkResult VKAPI vkDestroySemaphore(
+ VkDevice device,
+ VkSemaphore semaphore);
+
+VkResult VKAPI vkDestroyEvent(
+ VkDevice device,
+ VkEvent event);
+
+VkResult VKAPI vkDestroyQueryPool(
+ VkDevice device,
+ VkQueryPool queryPool);
+
+VkResult VKAPI vkDestroyBuffer(
+ VkDevice device,
+ VkBuffer buffer);
+
+VkResult VKAPI vkDestroyBufferView(
+ VkDevice device,
+ VkBufferView bufferView);
+
+VkResult VKAPI vkDestroyImage(
+ VkDevice device,
+ VkImage image);
+
+VkResult VKAPI vkDestroyImageView(
+ VkDevice device,
+ VkImageView imageView);
+
+VkResult VKAPI vkDestroyAttachmentView(
+ VkDevice device,
+ VkAttachmentView attachmentView);
+
+VkResult VKAPI vkDestroyShaderModule(
+ VkDevice device,
+ VkShaderModule shaderModule);
+
+VkResult VKAPI vkDestroyShader(
+ VkDevice device,
+ VkShader shader);
+
+VkResult VKAPI vkDestroyPipelineCache(
+ VkDevice device,
+ VkPipelineCache pipelineCache);
+
+VkResult VKAPI vkDestroyPipeline(
+ VkDevice device,
+ VkPipeline pipeline);
+
+VkResult VKAPI vkDestroyPipelineLayout(
+ VkDevice device,
+ VkPipelineLayout pipelineLayout);
+
+VkResult VKAPI vkDestroySampler(
+ VkDevice device,
+ VkSampler sampler);
+
+VkResult VKAPI vkDestroyDescriptorSetLayout(
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout);
+
+VkResult VKAPI vkDestroyDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool);
+
+VkResult VKAPI vkDestroyDynamicViewportState(
+ VkDevice device,
+ VkDynamicViewportState dynamicViewportState);
+
+VkResult VKAPI vkDestroyDynamicRasterState(
+ VkDevice device,
+ VkDynamicRasterState dynamicRasterState);
+
+VkResult VKAPI vkDestroyDynamicColorBlendState(
+ VkDevice device,
+ VkDynamicColorBlendState dynamicColorBlendState);
+
+VkResult VKAPI vkDestroyDynamicDepthStencilState(
+ VkDevice device,
+ VkDynamicDepthStencilState dynamicDepthStencilState);
+
+VkResult VKAPI vkDestroyFramebuffer(
+ VkDevice device,
+ VkFramebuffer framebuffer);
+
+VkResult VKAPI vkDestroyRenderPass(
+ VkDevice device,
+ VkRenderPass renderPass);
+
+VkResult VKAPI vkDestroyCommandPool(
+ VkDevice device,
+ VkCmdPool cmdPool);
+
+VkResult VKAPI vkDestroyCommandBuffer(
+ VkDevice device,
+ VkCmdBuffer commandBuffer);
+----
+
+API Queries
+-----------
+
+Objective of API query tests is to validate that various +vkGet*+ functions return correct values. Generic checks that apply to all query types are:
+
+ * Returned value size is equal or multiple of relevant struct size
+ * Query doesn't write outside the provided pointer
+ * Query values (where expected) don't change between subsequent queries
+ * Concurrent queries from multiple threads work
+
+Platform queries
+~~~~~~~~~~~~~~~~
+
+Platform query tests will validate that all queries work as expected and return sensible values.
+
+ * Sensible device properties
+ ** May have some Android-specific requirements
+ *** TBD queue 0 must be universal queue (all command types supported)
+ * All required functions present
+ ** Both platform (physicalDevice = 0) and device-specific
+ ** Culled based on enabled extension list?
+
+[source,c]
+----
+// Physical devices
+
+VkResult VKAPI vkEnumeratePhysicalDevices(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceCount,
+ VkPhysicalDevice* pPhysicalDevices);
+
+VkResult VKAPI vkGetPhysicalDeviceFeatures(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures* pFeatures);
+
+// Properties & limits
+
+VkResult VKAPI vkGetPhysicalDeviceLimits(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceLimits* pLimits);
+
+typedef struct {
+ uint32_t apiVersion;
+ uint32_t driverVersion;
+ uint32_t vendorId;
+ uint32_t deviceId;
+ VkPhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME];
+ uint8_t pipelineCacheUUID[VK_UUID_LENGTH];
+} VkPhysicalDeviceProperties;
+
+VkResult VKAPI vkGetPhysicalDeviceProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties* pProperties);
+
+// Queue properties
+
+VkResult VKAPI vkGetPhysicalDeviceQueueCount(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pCount);
+
+typedef enum {
+ VK_QUEUE_GRAPHICS_BIT = 0x00000001,
+ VK_QUEUE_COMPUTE_BIT = 0x00000002,
+ VK_QUEUE_DMA_BIT = 0x00000004,
+ VK_QUEUE_SPARSE_MEMMGR_BIT = 0x00000008,
+ VK_QUEUE_EXTENDED_BIT = 0x40000000,
+} VkQueueFlagBits;
+typedef VkFlags VkQueueFlags;
+
+typedef struct {
+ VkQueueFlags queueFlags;
+ uint32_t queueCount;
+ VkBool32 supportsTimestamps;
+} VkPhysicalDeviceQueueProperties;
+
+VkResult VKAPI vkGetPhysicalDeviceQueueProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t count,
+ VkPhysicalDeviceQueueProperties* pQueueProperties);
+
+// Memory properties
+
+typedef enum {
+ VK_MEMORY_PROPERTY_DEVICE_ONLY = 0,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000001,
+ VK_MEMORY_PROPERTY_HOST_NON_COHERENT_BIT = 0x00000002,
+ VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT = 0x00000004,
+ VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT = 0x00000008,
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
+} VkMemoryPropertyFlagBits;
+typedef VkFlags VkMemoryPropertyFlags;
+
+typedef enum {
+ VK_MEMORY_HEAP_HOST_LOCAL = 0x00000001,
+} VkMemoryHeapFlagBits;
+typedef VkFlags VkMemoryHeapFlags;
+
+typedef struct {
+ VkMemoryPropertyFlags propertyFlags;
+ uint32_t heapIndex;
+} VkMemoryType;
+
+typedef struct {
+ VkDeviceSize size;
+ VkMemoryHeapFlags flags;
+} VkMemoryHeap;
+
+typedef struct {
+ uint32_t memoryTypeCount;
+ VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
+ uint32_t memoryHeapCount;
+ VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS];
+} VkPhysicalDeviceMemoryProperties;
+
+VkResult VKAPI vkGetPhysicalDeviceMemoryProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+
+// Proc address queries
+
+PFN_vkVoidFunction VKAPI vkGetInstanceProcAddr(
+ VkInstance instance,
+ const char* pName);
+
+PFN_vkVoidFunction VKAPI vkGetDeviceProcAddr(
+ VkDevice device,
+ const char* pName);
+
+// Extension queries
+
+typedef struct {
+ char extName[VK_MAX_EXTENSION_NAME];
+ uint32_t specVersion;
+} VkExtensionProperties;
+
+VkResult VKAPI vkGetGlobalExtensionProperties(
+ const char* pLayerName,
+ uint32_t* pCount,
+ VkExtensionProperties* pProperties);
+
+VkResult VKAPI vkGetPhysicalDeviceExtensionProperties(
+ VkPhysicalDevice physicalDevice,
+ const char* pLayerName,
+ uint32_t* pCount,
+ VkExtensionProperties* pProperties);
+
+// Layer queries
+
+typedef struct {
+ char layerName[VK_MAX_EXTENSION_NAME];
+ uint32_t specVersion;
+ uint32_t implVersion;
+ const char* description[VK_MAX_DESCRIPTION];
+} VkLayerProperties;
+
+VkResult VKAPI vkGetGlobalLayerProperties(
+ uint32_t* pCount,
+ VkLayerProperties* pProperties);
+
+VkResult VKAPI vkGetPhysicalDeviceLayerProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pCount,
+ VkLayerProperties* pProperties);
+----
+
+Device queries
+~~~~~~~~~~~~~~
+
+[source,c]
+----
+VkResult VKAPI vkGetDeviceQueue(
+ VkDevice device,
+ uint32_t queueFamilyIndex,
+ uint32_t queueIndex,
+ VkQueue* pQueue);
+
+VkResult VKAPI vkGetDeviceMemoryCommitment(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize* pCommittedMemoryInBytes);
+----
+
+Object queries
+~~~~~~~~~~~~~~
+
+ * Memory requirements: verify that for buffers the returned size is at least the size of the buffer
+
+[source,c]
+----
+typedef struct {
+ VkDeviceSize size;
+ VkDeviceSize alignment;
+ uint32_t memoryTypeBits;
+} VkMemoryRequirements;
+
+VkResult VKAPI vkGetBufferMemoryRequirements(
+ VkDevice device,
+ VkBuffer buffer,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VkResult VKAPI vkGetImageMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ VkMemoryRequirements* pMemoryRequirements);
+----
+
+Format & image capabilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+typedef enum {
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,
+ VK_FORMAT_FEATURE_CONVERSION_BIT = 0x00000400,
+} VkFormatFeatureFlagBits;
+typedef VkFlags VkFormatFeatureFlags;
+
+typedef struct {
+ VkFormatFeatureFlags linearTilingFeatures;
+ VkFormatFeatureFlags optimalTilingFeatures;
+} VkFormatProperties;
+
+VkResult VKAPI vkGetPhysicalDeviceFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties* pFormatProperties);
+
+typedef struct {
+ uint64_t maxResourceSize;
+ uint32_t maxSamples;
+} VkImageFormatProperties;
+
+VkResult VKAPI vkGetPhysicalDeviceImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageFormatProperties* pImageFormatProperties);
+----
+
+Memory management
+-----------------
+
+Memory management tests cover memory allocation, sub-allocation, access, and CPU and GPU cache control. Testing some areas such as cache control will require stress-testing memory accesses from CPU and various pipeline stages.
+
+Memory allocation
+~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+} VkMemoryAllocInfo;
+
+VkResult VKAPI vkAllocMemory(
+ VkDevice device,
+ const VkMemoryAllocInfo* pAllocInfo,
+ VkDeviceMemory* pMem);
+
+VkResult VKAPI vkFreeMemory(
+ VkDevice device,
+ VkDeviceMemory mem);
+----
+
+ * Test combination of:
+ ** Various allocation sizes
+ ** All heaps
+ * Allocations that exceed total available memory size (expected to fail)
+ * Concurrent allocation and free from multiple threads
+ * Memory leak tests (may not work on platforms that overcommit)
+ ** Allocate memory until fails, free all and repeat
+ ** Total allocated memory size should remain stable over iterations
+ ** Allocate and free in random order
+
+.Spec issues
+
+What are the alignment guarantees for the returned memory allocation? Will it satisfy alignment requirements for all object types? If not, app needs to know the alignment, or alignment parameter needs to be added to +VkMemoryAllocInfo+.
+
+Minimum allocation size? If 1, presumably implementation has to round it up to next page size at least? Is there a query for that? What happens when accessing the added padding?
+
+Mapping memory and CPU access
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+VkResult VKAPI vkMapMemory(
+ VkDevice device,
+ VkDeviceMemory mem,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ void** ppData);
+
+VkResult VKAPI vkUnmapMemory(
+ VkDevice device,
+ VkDeviceMemory mem);
+----
+
+ * Verify that mapping of all host-visible allocations succeed and accessing memory works
+ * Verify mapping of sub-ranges
+ * Access still works after un-mapping and re-mapping memory
+ * Attaching or detaching memory allocation from buffer/image doesn't affect mapped memory access or contents
+ ** Images: test with various formats, mip-levels etc.
+
+.Spec issues
+ * Man pages say vkMapMemory is thread-safe, but to what extent?
+ ** Mapping different VkDeviceMemory allocs concurrently?
+ ** Mapping different sub-ranges of same VkDeviceMemory?
+ ** Mapping overlapping sub-ranges of same VkDeviceMemory?
+ * Okay to re-map same or overlapping range? What pointers should be returned in that case?
+ * Can re-mapping same block return different virtual address?
+ * Alignment of returned CPU pointer?
+ ** Access using SIMD instructions can benefit from alignment
+
+CPU cache control
+~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory mem;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkMappedMemoryRange;
+
+VkResult VKAPI vkFlushMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memRangeCount,
+ const VkMappedMemoryRange* pMemRanges);
+
+VkResult VKAPI vkInvalidateMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memRangeCount,
+ const VkMappedMemoryRange* pMemRanges);
+----
+
+ * TODO Semantics discussed at https://cvs.khronos.org/bugzilla/show_bug.cgi?id=13690
+ ** Invalidate relevant for HOST_NON_COHERENT_BIT, flushes CPU read caches
+ ** Flush flushes CPU write caches?
+ * Test behavior with all possible mem alloc types & various sizes
+ * Corner-cases:
+ ** Empty list
+ ** Empty ranges
+ ** Same range specified multiple times
+ ** Partial overlap between ranges
+
+.Spec issues
+ * Thread-safety? Okay to flush different ranges concurrently?
+
+GPU cache control
+~~~~~~~~~~~~~~~~~
+
+Validate that GPU caches are invalidated where instructed. This includes visibility of memory writes made by both CPU and GPU to both CPU and GPU pipeline stages.
+
+[source,c]
+----
+typedef enum {
+ VK_MEMORY_OUTPUT_HOST_WRITE_BIT = 0x00000001,
+ VK_MEMORY_OUTPUT_SHADER_WRITE_BIT = 0x00000002,
+ VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT = 0x00000004,
+ VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000008,
+ VK_MEMORY_OUTPUT_TRANSFER_BIT = 0x00000010,
+} VkMemoryOutputFlagBits;
+typedef VkFlags VkMemoryOutputFlags;
+
+typedef enum {
+ VK_MEMORY_INPUT_HOST_READ_BIT = 0x00000001,
+ VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT = 0x00000002,
+ VK_MEMORY_INPUT_INDEX_FETCH_BIT = 0x00000004,
+ VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT = 0x00000008,
+ VK_MEMORY_INPUT_UNIFORM_READ_BIT = 0x00000010,
+ VK_MEMORY_INPUT_SHADER_READ_BIT = 0x00000020,
+ VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT = 0x00000040,
+ VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000080,
+ VK_MEMORY_INPUT_INPUT_ATTACHMENT_BIT = 0x00000100,
+ VK_MEMORY_INPUT_TRANSFER_BIT = 0x00000200,
+} VkMemoryInputFlagBits;
+typedef VkFlags VkMemoryInputFlags;
+
+typedef enum {
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,
+ VK_PIPELINE_STAGE_TESS_CONTROL_SHADER_BIT = 0x00000010,
+ VK_PIPELINE_STAGE_TESS_EVALUATION_SHADER_BIT = 0x00000020,
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,
+ VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,
+ VK_PIPELINE_STAGE_TRANSITION_BIT = 0x00002000,
+ VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS = 0x000007FF,
+ VK_PIPELINE_STAGE_ALL_GPU_COMMANDS = 0x00003FFF,
+} VkPipelineStageFlagBits;
+typedef VkFlags VkPipelineStageFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t destQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkBufferMemoryBarrier;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t destQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+} VkImageMemoryBarrier;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+} VkMemoryBarrier;
+
+void VKAPI vkCmdPipelineBarrier(
+ VkCmdBuffer cmdBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ VkBool32 byRegion,
+ uint32_t memBarrierCount,
+ const void* const* ppMemBarriers);
+
+// \note vkCmdWaitEvents includes memory barriers as well
+----
+
+ * Image layout transitions may need special care
+
+Binding memory to objects
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+VkResult VKAPI vkBindBufferMemory(
+ VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset);
+
+VkResult VKAPI vkBindImageMemory(
+ VkDevice device,
+ VkImage image,
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset);
+----
+
+ * Buffers and images only
+ * Straightforward mapping where allocation size matches object size and memOffset = 0
+ * Sub-allocation of larger allocations
+ * Re-binding object to different memory allocation
+ * Binding multiple objects to same or partially overlapping memory ranges
+ ** Aliasing writable resources? Access granularity?
+ * Binding various (supported) types of memory allocations
+
+.Spec issues
+ * When binding multiple objects to same memory, will data in memory be visible for all objects?
+ ** Reinterpretation rules?
+ * Memory contents after re-binding memory to a different object?
+
+Sparse resources
+----------------
+
+Sparse memory resources are treated as separate feature from basic memory management. Details TBD still.
+
+[source,c]
+----
+typedef enum {
+ VK_SPARSE_MEMORY_BIND_REPLICATE_64KIB_BLOCK_BIT = 0x00000001,
+} VkSparseMemoryBindFlagBits;
+typedef VkFlags VkSparseMemoryBindFlags;
+
+typedef struct {
+ VkDeviceSize offset;
+ VkDeviceSize memOffset;
+ VkDeviceMemory mem;
+ VkSparseMemoryBindFlags flags;
+} VkSparseMemoryBindInfo;
+
+VkResult VKAPI vkQueueBindSparseBufferMemory(
+ VkQueue queue,
+ VkBuffer buffer,
+ uint32_t numBindings,
+ const VkSparseMemoryBindInfo* pBindInfo);
+
+VkResult VKAPI vkQueueBindSparseImageOpaqueMemory(
+ VkQueue queue,
+ VkImage image,
+ uint32_t numBindings,
+ const VkSparseMemoryBindInfo* pBindInfo);
+
+// Non-opaque sparse images
+
+typedef enum {
+ VK_SPARSE_IMAGE_FMT_SINGLE_MIPTAIL_BIT = 0x00000001,
+ VK_SPARSE_IMAGE_FMT_ALIGNED_MIP_SIZE_BIT = 0x00000002,
+ VK_SPARSE_IMAGE_FMT_NONSTD_BLOCK_SIZE_BIT = 0x00000004,
+} VkSparseImageFormatFlagBits;
+typedef VkFlags VkSparseImageFormatFlags;
+
+typedef struct {
+ VkImageAspect aspect;
+ VkExtent3D imageGranularity;
+ VkSparseImageFormatFlags flags;
+} VkSparseImageFormatProperties;
+
+VkResult VKAPI vkGetPhysicalDeviceSparseImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ uint32_t samples,
+ VkImageUsageFlags usage,
+ VkImageTiling tiling,
+ uint32_t* pNumProperties,
+ VkSparseImageFormatProperties* pProperties);
+
+typedef struct {
+ VkSparseImageFormatProperties formatProps;
+ uint32_t imageMipTailStartLOD;
+ VkDeviceSize imageMipTailSize;
+ VkDeviceSize imageMipTailOffset;
+ VkDeviceSize imageMipTailStride;
+} VkSparseImageMemoryRequirements;
+
+VkResult VKAPI vkGetImageSparseMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ uint32_t* pNumRequirements,
+ VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+
+typedef struct {
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+ VkDeviceSize memOffset;
+ VkDeviceMemory mem;
+ VkSparseMemoryBindFlags flags;
+} VkSparseImageMemoryBindInfo;
+
+VkResult VKAPI vkQueueBindSparseImageMemory(
+ VkQueue queue,
+ VkImage image,
+ uint32_t numBindings,
+ const VkSparseImageMemoryBindInfo* pBindInfo);
+----
+
+Binding model
+-------------
+
+The objective of the binding model tests is to verify:
+
+ * All valid descriptor sets can be created
+ * Accessing resources from shaders using various layouts
+ * Descriptor updates
+ * Descriptor set chaining
+ * Descriptor set limits
+
+As a necessary side effect, the tests will provide coverage for allocating and accessing all types of resources from all shader stages.
+
+Descriptor set functions
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+// DescriptorSetLayout
+
+typedef struct {
+ VkDescriptorType descriptorType;
+ uint32_t arraySize;
+ VkShaderStageFlags stageFlags;
+ const VkSampler* pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t count;
+ const VkDescriptorSetLayoutBinding* pBinding;
+} VkDescriptorSetLayoutCreateInfo;
+
+VkResult VKAPI vkCreateDescriptorSetLayout(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ VkDescriptorSetLayout* pSetLayout);
+
+// DescriptorPool
+
+typedef struct {
+ VkDescriptorType type;
+ uint32_t count;
+} VkDescriptorTypeCount;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t count;
+ const VkDescriptorTypeCount* pTypeCount;
+} VkDescriptorPoolCreateInfo;
+
+VkResult VKAPI vkCreateDescriptorPool(
+ VkDevice device,
+ VkDescriptorPoolUsage poolUsage,
+ uint32_t maxSets,
+ const VkDescriptorPoolCreateInfo* pCreateInfo,
+ VkDescriptorPool* pDescriptorPool);
+
+VkResult VKAPI vkResetDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool);
+
+// DescriptorSet
+
+typedef struct {
+ VkBufferView bufferView;
+ VkSampler sampler;
+ VkImageView imageView;
+ VkAttachmentView attachmentView;
+ VkImageLayout imageLayout;
+} VkDescriptorInfo;
+
+VkResult VKAPI vkAllocDescriptorSets(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorSetUsage setUsage,
+ uint32_t count,
+ const VkDescriptorSetLayout* pSetLayouts,
+ VkDescriptorSet* pDescriptorSets,
+ uint32_t* pCount);
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet destSet;
+ uint32_t destBinding;
+ uint32_t destArrayElement;
+ uint32_t count;
+ VkDescriptorType descriptorType;
+ const VkDescriptorInfo* pDescriptors;
+} VkWriteDescriptorSet;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet srcSet;
+ uint32_t srcBinding;
+ uint32_t srcArrayElement;
+ VkDescriptorSet destSet;
+ uint32_t destBinding;
+ uint32_t destArrayElement;
+ uint32_t count;
+} VkCopyDescriptorSet;
+
+VkResult VKAPI vkUpdateDescriptorSets(
+ VkDevice device,
+ uint32_t writeCount,
+ const VkWriteDescriptorSet* pDescriptorWrites,
+ uint32_t copyCount,
+ const VkCopyDescriptorSet* pDescriptorCopies);
+----
+
+Pipeline layout functions
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Pipeline layouts will be covered mostly by tests that use various layouts, but in addition some corner-case tests are needed:
+
+ * Creating empty layouts for shaders that don't use any resources
+ ** For example: vertex data generated with +gl_VertexID+ only
+
+[source,c]
+----
+typedef struct {
+ VkShaderStageFlags stageFlags;
+ uint32_t start;
+ uint32_t length;
+} VkPushConstantRange;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t descriptorSetCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+ uint32_t pushConstantRangeCount;
+ const VkPushConstantRange* pPushConstantRanges;
+} VkPipelineLayoutCreateInfo;
+
+VkResult VKAPI vkCreatePipelineLayout(
+ VkDevice device,
+ const VkPipelineLayoutCreateInfo* pCreateInfo,
+ VkPipelineLayout* pPipelineLayout);
+----
+
+Multipass
+---------
+
+Multipass tests will verify:
+
+ * Various possible multipass data flow configurations
+ ** Target formats, number of targets, load, store, resolve, dependencies, ...
+ ** Exhaustive tests for selected dimensions
+ ** Randomized tests
+ * Interaction with other features
+ ** Blending
+ ** Tessellation, geometry shaders (esp. massive geometry expansion)
+ ** Barriers that may cause tiler flushes
+ ** Queries
+ * Large passes that may require tiler flushes
+
+[source,c]
+----
+// Framebuffer
+
+typedef struct {
+ VkAttachmentView view;
+ VkImageLayout layout;
+} VkAttachmentBindInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ uint32_t attachmentCount;
+ const VkAttachmentBindInfo* pAttachments;
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+} VkFramebufferCreateInfo;
+
+VkResult VKAPI vkCreateFramebuffer(
+ VkDevice device,
+ const VkFramebufferCreateInfo* pCreateInfo,
+ VkFramebuffer* pFramebuffer);
+
+// RenderPass
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ uint32_t samples;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkAttachmentLoadOp stencilLoadOp;
+ VkAttachmentStoreOp stencilStoreOp;
+ VkImageLayout initialLayout;
+ VkImageLayout finalLayout;
+} VkAttachmentDescription;
+
+typedef struct {
+ uint32_t attachment;
+ VkImageLayout layout;
+} VkAttachmentReference;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkSubpassDescriptionFlags flags;
+ uint32_t inputCount;
+ const VkAttachmentReference* inputAttachments;
+ uint32_t colorCount;
+ const VkAttachmentReference* colorAttachments;
+ const VkAttachmentReference* resolveAttachments;
+ VkAttachmentReference depthStencilAttachment;
+ uint32_t preserveCount;
+ const VkAttachmentReference* preserveAttachments;
+} VkSubpassDescription;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t srcSubpass;
+ uint32_t destSubpass;
+ VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags destStageMask;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+ VkBool32 byRegion;
+} VkSubpassDependency;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t attachmentCount;
+ const VkAttachmentDescription* pAttachments;
+ uint32_t subpassCount;
+ const VkSubpassDescription* pSubpasses;
+ uint32_t dependencyCount;
+ const VkSubpassDependency* pDependencies;
+} VkRenderPassCreateInfo;
+
+VkResult VKAPI vkCreateRenderPass(
+ VkDevice device,
+ const VkRenderPassCreateInfo* pCreateInfo,
+ VkRenderPass* pRenderPass);
+
+VkResult VKAPI vkGetRenderAreaGranularity(
+ VkDevice device,
+ VkRenderPass renderPass,
+ VkExtent2D* pGranularity);
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+ VkRect2D renderArea;
+ uint32_t attachmentCount;
+ const VkClearValue* pAttachmentClearValues;
+} VkRenderPassBeginInfo;
+
+typedef enum {
+ VK_RENDER_PASS_CONTENTS_INLINE = 0,
+ VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS = 1,
+ VK_RENDER_PASS_CONTENTS_BEGIN_RANGE = VK_RENDER_PASS_CONTENTS_INLINE,
+ VK_RENDER_PASS_CONTENTS_END_RANGE = VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS,
+ VK_RENDER_PASS_CONTENTS_NUM = (VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS - VK_RENDER_PASS_CONTENTS_INLINE + 1),
+ VK_RENDER_PASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF
+} VkRenderPassContents;
+
+void VKAPI vkCmdBeginRenderPass(
+ VkCmdBuffer cmdBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkRenderPassContents contents);
+
+void VKAPI vkCmdNextSubpass(
+ VkCmdBuffer cmdBuffer,
+ VkRenderPassContents contents);
+
+void VKAPI vkCmdEndRenderPass(
+ VkCmdBuffer cmdBuffer);
+----
+
+Device initialization
+---------------------
+
+Device initialization tests verify that all reported devices can be created, with various possible configurations.
+
+[source,c]
+----
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pAppName;
+ uint32_t appVersion;
+ const char* pEngineName;
+ uint32_t engineVersion;
+ uint32_t apiVersion;
+} VkApplicationInfo;
+
+typedef void* (VKAPI *PFN_vkAllocFunction)(
+ void* pUserData,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocType allocType);
+
+typedef void (VKAPI *PFN_vkFreeFunction)(
+ void* pUserData,
+ void* pMem);
+
+typedef struct {
+ void* pUserData;
+ PFN_vkAllocFunction pfnAlloc;
+ PFN_vkFreeFunction pfnFree;
+} VkAllocCallbacks;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ const VkApplicationInfo* pAppInfo;
+ const VkAllocCallbacks* pAllocCb;
+ uint32_t layerCount;
+ const char*const* ppEnabledLayerNames;
+ uint32_t extensionCount;
+ const char*const* ppEnabledExtensionNames;
+} VkInstanceCreateInfo;
+
+VkResult VKAPI vkCreateInstance(
+ const VkInstanceCreateInfo* pCreateInfo,
+ VkInstance* pInstance);
+----
+
+ - +VkApplicationInfo+ parameters
+ * Arbitrary +pAppName+ / +pEngineName+ (spaces, utf-8, ...)
+ * +pAppName+ / +pEngineName+ = NULL?
+ * +appVersion+ / +engineVersion+ for 0, ~0, couple of values
+ * Valid +apiVersion+
+ * Invalid +apiVersion+ (expected to fail?)
+ - +VkAllocCallbacks+
+ * Want to be able to run all tests with and without callbacks?
+ ** See discussion about default device in framework section
+ * Custom allocators that provide guardbands and check them at free
+ * Override malloc / free and verify that driver doesn't call if callbacks provided
+ ** As part of object mgmt tests
+ * Must be inherited to all devices created from instance
+ - +VkInstanceCreateInfo+
+ * Empty extension list
+ * Unsupported extensions (expect VK_UNSUPPORTED)
+ * Various combinations of supported extensions
+ ** Any dependencies between extensions (enabling Y requires enabling X)?
+
+.Spec issues
+ * Only VkPhysicalDevice is passed to vkCreateDevice, ICD-specific magic needed for passing callbacks down to VkDevice instance
+
+[source,c]
+----
+typedef struct {
+ VkBool32 robustBufferAccess;
+ VkBool32 fullDrawIndexUint32;
+ VkBool32 imageCubeArray;
+ VkBool32 independentBlend;
+ VkBool32 geometryShader;
+ VkBool32 tessellationShader;
+ VkBool32 sampleRateShading;
+ VkBool32 dualSourceBlend;
+ VkBool32 logicOp;
+ VkBool32 instancedDrawIndirect;
+ VkBool32 depthClip;
+ VkBool32 depthBiasClamp;
+ VkBool32 fillModeNonSolid;
+ VkBool32 depthBounds;
+ VkBool32 wideLines;
+ VkBool32 largePoints;
+ VkBool32 textureCompressionETC2;
+ VkBool32 textureCompressionASTC_LDR;
+ VkBool32 textureCompressionBC;
+ VkBool32 pipelineStatisticsQuery;
+ VkBool32 vertexSideEffects;
+ VkBool32 tessellationSideEffects;
+ VkBool32 geometrySideEffects;
+ VkBool32 fragmentSideEffects;
+ VkBool32 shaderTessellationPointSize;
+ VkBool32 shaderGeometryPointSize;
+ VkBool32 shaderTextureGatherExtended;
+ VkBool32 shaderStorageImageExtendedFormats;
+ VkBool32 shaderStorageImageMultisample;
+ VkBool32 shaderStorageBufferArrayConstantIndexing;
+ VkBool32 shaderStorageImageArrayConstantIndexing;
+ VkBool32 shaderUniformBufferArrayDynamicIndexing;
+ VkBool32 shaderSampledImageArrayDynamicIndexing;
+ VkBool32 shaderStorageBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageImageArrayDynamicIndexing;
+ VkBool32 shaderClipDistance;
+ VkBool32 shaderCullDistance;
+ VkBool32 shaderFloat64;
+ VkBool32 shaderInt64;
+ VkBool32 shaderFloat16;
+ VkBool32 shaderInt16;
+ VkBool32 shaderResourceResidency;
+ VkBool32 shaderResourceMinLOD;
+ VkBool32 sparse;
+ VkBool32 sparseResidencyBuffer;
+ VkBool32 sparseResidencyImage2D;
+ VkBool32 sparseResidencyImage3D;
+ VkBool32 sparseResidency2Samples;
+ VkBool32 sparseResidency4Samples;
+ VkBool32 sparseResidency8Samples;
+ VkBool32 sparseResidency16Samples;
+ VkBool32 sparseResidencyStandard2DBlockShape;
+ VkBool32 sparseResidencyStandard2DMSBlockShape;
+ VkBool32 sparseResidencyStandard3DBlockShape;
+ VkBool32 sparseResidencyAlignedMipSize;
+ VkBool32 sparseResidencyNonResident;
+ VkBool32 sparseResidencyNonResidentStrict;
+ VkBool32 sparseResidencyAliased;
+} VkPhysicalDeviceFeatures;
+
+typedef struct {
+ uint32_t queueFamilyIndex;
+ uint32_t queueCount;
+} VkDeviceQueueCreateInfo;
+
+typedef enum {
+ VK_DEVICE_CREATE_VALIDATION_BIT = 0x00000001,
+} VkDeviceCreateFlagBits;
+typedef VkFlags VkDeviceCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t queueRecordCount;
+ const VkDeviceQueueCreateInfo* pRequestedQueues;
+ uint32_t layerCount;
+ const char*const* ppEnabledLayerNames;
+ uint32_t extensionCount;
+ const char*const* ppEnabledExtensionNames;
+ const VkPhysicalDeviceFeatures* pEnabledFeatures;
+ VkDeviceCreateFlags flags;
+} VkDeviceCreateInfo;
+
+VkResult VKAPI vkCreateDevice(
+ VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo* pCreateInfo,
+ VkDevice* pDevice);
+----
+
+ * Creating multiple devices from single physical device
+ * Different queue configurations
+ ** Combinations of supported node indexes
+ ** Use of all queues simultaneously for various operations
+ ** Various queue counts
+ * Various extension combinations
+ * Flags
+ ** Enabling validation (see spec issues)
+ ** VK_DEVICE_CREATE_MULTI_DEVICE_IQ_MATCH_BIT not relevant for Android
+
+.Spec issues
+ * Can same queue node index used multiple times in +pRequestedQueues+ list?
+ * VK_DEVICE_CREATE_VALIDATION_BIT vs. layers
+
+Queue functions
+---------------
+
+Queue functions (one currently) will have a lot of indicental coverage from other tests, so only targeted corner-case tests are needed:
+
+ * +cmdBufferCount+ = 0
+ * Submitting empty VkCmdBuffer
+
+[source,c]
+----
+VkResult VKAPI vkQueueSubmit(
+ VkQueue queue,
+ uint32_t cmdBufferCount,
+ const VkCmdBuffer* pCmdBuffers,
+ VkFence fence);
+----
+
+.Spec issues
+ * Can +fence+ be +NULL+ if app doesn't need it?
+
+Synchronization
+---------------
+
+Synchronization tests will verify that all execution ordering primitives provided by the API will function as expected. Testing scheduling and synchronization robustness will require generating non-trivial workloads and possibly randomization to reveal potential issues.
+
+[source,c]
+----
+VkResult VKAPI vkQueueWaitIdle(
+ VkQueue queue);
+
+VkResult VKAPI vkDeviceWaitIdle(
+ VkDevice device);
+----
+
+ * Verify that all sync objects signaled after *WaitIdle() returns
+ ** Fences (vkGetFenceStatus)
+ ** Events (vkEventGetStatus)
+ ** No way to query semaphore status?
+ * Threads blocking at vkWaitForFences() must be resumed
+ * Various amounts of work queued (from nothing to large command buffers)
+ * vkDeviceWaitIdle() concurrently with commands that submit more work
+ * all types of work
+
+Fences
+~~~~~~
+
+[source,c]
+----
+typedef enum {
+ VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
+} VkFenceCreateFlagBits;
+typedef VkFlags VkFenceCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkFenceCreateFlags flags;
+} VkFenceCreateInfo;
+
+VkResult VKAPI vkCreateFence(
+ VkDevice device,
+ const VkFenceCreateInfo* pCreateInfo,
+ VkFence* pFence);
+
+VkResult VKAPI vkResetFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences);
+
+VkResult VKAPI vkGetFenceStatus(
+ VkDevice device,
+ VkFence fence);
+
+VkResult VKAPI vkWaitForFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences,
+ VkBool32 waitAll,
+ uint64_t timeout);
+----
+
+ * Basic waiting on fences
+ ** All types of commands
+ ** Waiting on a different thread than the thread that submitted the work
+ * Reusing fences (vkResetFences)
+ * Waiting on a fence / querying status of a fence before it has been submitted to be signaled
+ * Waiting on a fence / querying status of a fence has just been created with CREATE_SIGNALED_BIT
+ ** Reuse in different queue
+ ** Different queues
+
+.Spec issues
+ * Using same fence in multiple vkQueueSubmit calls without waiting/resetting in between
+ ** Completion of first cmdbuf will reset fence and others won't do anything?
+ * Waiting on same fence from multiple threads?
+
+Semaphores
+~~~~~~~~~~
+
+[source,c]
+----
+typedef VkFlags VkSemaphoreCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreCreateFlags flags;
+} VkSemaphoreCreateInfo;
+
+VkResult VKAPI vkCreateSemaphore(
+ VkDevice device,
+ const VkSemaphoreCreateInfo* pCreateInfo,
+ VkSemaphore* pSemaphore);
+
+VkResult VKAPI vkQueueSignalSemaphore(
+ VkQueue queue,
+ VkSemaphore semaphore);
+
+VkResult VKAPI vkQueueWaitSemaphore(
+ VkQueue queue,
+ VkSemaphore semaphore);
+----
+
+ * All types of commands waiting & signaling semaphore
+ * Cross-queue semaphores
+ * Queuing wait on initially signaled semaphore
+ * Queuing wait immediately after queuing signaling
+ * vkQueueWaitIdle & vkDeviceWaitIdle waiting on semaphore
+ * Multiple queues waiting on same semaphore
+
+NOTE: Semaphores might change; counting is causing problems for some IHVs.
+
+Events
+~~~~~~
+
+[source,c]
+----
+typedef VkFlags VkEventCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkEventCreateFlags flags;
+} VkEventCreateInfo;
+
+VkResult VKAPI vkCreateEvent(
+ VkDevice device,
+ const VkEventCreateInfo* pCreateInfo,
+ VkEvent* pEvent);
+
+VkResult VKAPI vkGetEventStatus(
+ VkDevice device,
+ VkEvent event);
+
+VkResult VKAPI vkSetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VkResult VKAPI vkResetEvent(
+ VkDevice device,
+ VkEvent event);
+
+void VKAPI vkCmdSetEvent(
+ VkCmdBuffer cmdBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+void VKAPI vkCmdResetEvent(
+ VkCmdBuffer cmdBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+void VKAPI vkCmdWaitEvents(
+ VkCmdBuffer cmdBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ uint32_t memBarrierCount,
+ const void* const* ppMemBarriers);
+----
+
+ * All types of work waiting on all types of events
+ ** Including signaling from CPU side (vkSetEvent)
+ ** Memory barrier
+ * Polling event status (vkGetEventStatus)
+ * Memory barriers (see also GPU cache control)
+ * Corner-cases:
+ ** Re-setting event before it has been signaled
+ ** Polling status of event concurrently with signaling it or re-setting it from another thread
+ ** Multiple commands (maybe multiple queues as well) setting same event
+ *** Presumably first set will take effect, rest have no effect before event is re-set
+
+Pipeline queries
+----------------
+
+Pipeline query test details TBD. These are of lower priority initially.
+
+NOTE: Currently contains only exact occlusion query as mandatory. Might be problematic for some, and may change?
+
+[source,c]
+----
+typedef enum {
+ VK_QUERY_TYPE_OCCLUSION = 0,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+ VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION,
+ VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_PIPELINE_STATISTICS,
+ VK_QUERY_TYPE_NUM = (VK_QUERY_TYPE_PIPELINE_STATISTICS - VK_QUERY_TYPE_OCCLUSION + 1),
+ VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkQueryType;
+
+typedef enum {
+ VK_QUERY_PIPELINE_STATISTIC_IA_VERTICES_BIT = 0x00000001,
+ VK_QUERY_PIPELINE_STATISTIC_IA_PRIMITIVES_BIT = 0x00000002,
+ VK_QUERY_PIPELINE_STATISTIC_VS_INVOCATIONS_BIT = 0x00000004,
+ VK_QUERY_PIPELINE_STATISTIC_GS_INVOCATIONS_BIT = 0x00000008,
+ VK_QUERY_PIPELINE_STATISTIC_GS_PRIMITIVES_BIT = 0x00000010,
+ VK_QUERY_PIPELINE_STATISTIC_C_INVOCATIONS_BIT = 0x00000020,
+ VK_QUERY_PIPELINE_STATISTIC_C_PRIMITIVES_BIT = 0x00000040,
+ VK_QUERY_PIPELINE_STATISTIC_FS_INVOCATIONS_BIT = 0x00000080,
+ VK_QUERY_PIPELINE_STATISTIC_TCS_PATCHES_BIT = 0x00000100,
+ VK_QUERY_PIPELINE_STATISTIC_TES_INVOCATIONS_BIT = 0x00000200,
+ VK_QUERY_PIPELINE_STATISTIC_CS_INVOCATIONS_BIT = 0x00000400,
+} VkQueryPipelineStatisticFlagBits;
+typedef VkFlags VkQueryPipelineStatisticFlags;
+
+typedef enum {
+ VK_QUERY_RESULT_DEFAULT = 0,
+ VK_QUERY_RESULT_64_BIT = 0x00000001,
+ VK_QUERY_RESULT_WAIT_BIT = 0x00000002,
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,
+ VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,
+} VkQueryResultFlagBits;
+typedef VkFlags VkQueryResultFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkQueryType queryType;
+ uint32_t slots;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkQueryPoolCreateInfo;
+
+VkResult VKAPI vkCreateQueryPool(
+ VkDevice device,
+ const VkQueryPoolCreateInfo* pCreateInfo,
+ VkQueryPool* pQueryPool);
+
+VkResult VKAPI vkGetQueryPoolResults(
+ VkDevice device,
+ VkQueryPool queryPool,
+ uint32_t startQuery,
+ uint32_t queryCount,
+ size_t* pDataSize,
+ void* pData,
+ VkQueryResultFlags flags);
+
+void VKAPI vkCmdBeginQuery(
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
+ uint32_t slot,
+ VkQueryControlFlags flags);
+
+void VKAPI vkCmdEndQuery(
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
+ uint32_t slot);
+
+void VKAPI vkCmdResetQueryPool(
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
+ uint32_t startQuery,
+ uint32_t queryCount);
+
+void VKAPI vkCmdCopyQueryPoolResults(
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
+ uint32_t startQuery,
+ uint32_t queryCount,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize destStride,
+ VkQueryResultFlags flags);
+----
+
+Buffers
+-------
+
+Buffers will have a lot of coverage from memory management and access tests. Targeted buffer tests need to verify that various corner-cases and more excotic configurations work as expected.
+
+.Spec issues
+ * Does +VK_BUFFER_USAGE_GENERAL+ allow buffer to be used in any situation?
+ * All combinations of usage flags are valid?
+
+[source,c]
+----
+typedef enum {
+ VK_BUFFER_USAGE_GENERAL = 0,
+ VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT = 0x00000001,
+ VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT = 0x00000002,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,
+} VkBufferUsageFlagBits;
+typedef VkFlags VkBufferUsageFlags;
+
+typedef enum {
+ VK_BUFFER_CREATE_SPARSE_BIT = 0x00000001,
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+} VkBufferCreateFlagBits;
+typedef VkFlags VkBufferCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize size;
+ VkBufferUsageFlags usage;
+ VkBufferCreateFlags flags;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyCount;
+ const uint32_t* pQueueFamilyIndices;
+} VkBufferCreateInfo;
+
+VkResult VKAPI vkCreateBuffer(
+ VkDevice device,
+ const VkBufferCreateInfo* pCreateInfo,
+ VkBuffer* pBuffer);
+----
+
+ * All (valid and supported) combinations of create and usage flags work
+ * Buffers of various sizes can be created and they report sensible memory requirements
+ * Sparse buffers: very large (limit TBD) buffers can be created
+
+[source,c]
+----
+typedef enum {
+ VK_BUFFER_VIEW_TYPE_RAW = 0,
+ VK_BUFFER_VIEW_TYPE_FORMATTED = 1,
+ VK_BUFFER_VIEW_TYPE_BEGIN_RANGE = VK_BUFFER_VIEW_TYPE_RAW,
+ VK_BUFFER_VIEW_TYPE_END_RANGE = VK_BUFFER_VIEW_TYPE_FORMATTED,
+ VK_BUFFER_VIEW_TYPE_NUM = (VK_BUFFER_VIEW_TYPE_FORMATTED - VK_BUFFER_VIEW_TYPE_RAW + 1),
+ VK_BUFFER_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkBufferViewType;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer buffer;
+ VkBufferViewType viewType;
+ VkFormat format;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkBufferViewCreateInfo;
+
+VkResult VKAPI vkCreateBufferView(
+ VkDevice device,
+ const VkBufferViewCreateInfo* pCreateInfo,
+ VkBufferView* pView);
+----
+
+ * Buffer views of all (valid) types and formats can be created from all (compatible) buffers
+ * Various view sizes
+ ** Complete buffer
+ ** Partial buffer
+ * View can be created before and after attaching memory to buffer
+ * Changing memory binding makes memory contents visible in already created views
+ ** Concurrently changing memory binding and creating views
+
+.Spec issues
+ * Alignment or size requirements for buffer views?
+
+Images
+------
+
+Like buffers, images will have significant coverage from other test groups that focus on various ways to access image data. Additional coverage not provided by those tests will be included in this feature group.
+
+Image functions
+~~~~~~~~~~~~~~~
+
+.Spec issues
+ * +VK_IMAGE_USAGE_GENERAL+?
+
+[source,c]
+----
+typedef enum {
+ VK_IMAGE_TYPE_1D = 0,
+ VK_IMAGE_TYPE_2D = 1,
+ VK_IMAGE_TYPE_3D = 2,
+ VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D,
+ VK_IMAGE_TYPE_NUM = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1),
+ VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageType;
+
+typedef enum {
+ VK_IMAGE_TILING_LINEAR = 0,
+ VK_IMAGE_TILING_OPTIMAL = 1,
+ VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_LINEAR,
+ VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_TILING_NUM = (VK_IMAGE_TILING_OPTIMAL - VK_IMAGE_TILING_LINEAR + 1),
+ VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF
+} VkImageTiling;
+
+typedef enum {
+ VK_IMAGE_USAGE_GENERAL = 0,
+ VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT = 0x00000001,
+ VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT = 0x00000002,
+ VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,
+ VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_BIT = 0x00000020,
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,
+} VkImageUsageFlagBits;
+typedef VkFlags VkImageUsageFlags;
+
+typedef enum {
+ VK_IMAGE_CREATE_SPARSE_BIT = 0x00000001,
+ VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_IMAGE_CREATE_INVARIANT_DATA_BIT = 0x00000008,
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000010,
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000020,
+} VkImageCreateFlagBits;
+typedef VkFlags VkImageCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
+ uint32_t mipLevels;
+ uint32_t arraySize;
+ uint32_t samples;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkImageCreateFlags flags;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyCount;
+ const uint32_t* pQueueFamilyIndices;
+} VkImageCreateInfo;
+
+VkResult VKAPI vkCreateImage(
+ VkDevice device,
+ const VkImageCreateInfo* pCreateInfo,
+ VkImage* pImage);
+
+VkResult VKAPI vkGetImageSubresourceLayout(
+ VkDevice device,
+ VkImage image,
+ const VkImageSubresource* pSubresource,
+ VkSubresourceLayout* pLayout);
+----
+
+ * All valid and supported combinations of image parameters
+ ** Sampling verification with nearest only (other modes will be covered separately)
+ * Various image sizes
+ * Linear-layout images & writing data from CPU
+ * Copying data between identical opaque-layout images on CPU?
+
+Image view functions
+~~~~~~~~~~~~~~~~~~~~
+
+.Spec issues
+ * What are format compatibility rules?
+ * Can color/depth/stencil attachments to write to image which has different format?
+ ** Can I create DS view of RGBA texture and write to only one component by creating VkDepthStencilView for example?
+ * Image view granularity
+ ** All sub-rects allowed? In all use cases (RTs for example)?
+ * Memory access granularity
+ ** Writing concurrently to different areas of same memory backed by same/different image or view
+
+[source,c]
+----
+typedef struct {
+ VkChannelSwizzle r;
+ VkChannelSwizzle g;
+ VkChannelSwizzle b;
+ VkChannelSwizzle a;
+} VkChannelMapping;
+
+typedef struct {
+ VkImageAspect aspect;
+ uint32_t baseMipLevel;
+ uint32_t mipLevels;
+ uint32_t baseArraySlice;
+ uint32_t arraySize;
+} VkImageSubresourceRange;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkChannelMapping channels;
+ VkImageSubresourceRange subresourceRange;
+} VkImageViewCreateInfo;
+
+VkResult VKAPI vkCreateImageView(
+ VkDevice device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ VkImageView* pView);
+----
+
+ * Image views of all (valid) types and formats can be created from all (compatible) images
+ * Channel swizzles
+ * Depth- and stencil-mode
+ * Different formats
+ * Various view sizes
+ ** Complete image
+ ** Partial image (mip- or array slice)
+ * View can be created before and after attaching memory to image
+ * Changing memory binding makes memory contents visible in already created views
+ ** Concurrently changing memory binding and creating views
+
+[source,c]
+----
+typedef enum {
+ VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_DEPTH_BIT = 0x00000001,
+ VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_STENCIL_BIT = 0x00000002,
+} VkAttachmentViewCreateFlagBits;
+typedef VkFlags VkAttachmentViewCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkFormat format;
+ uint32_t mipLevel;
+ uint32_t baseArraySlice;
+ uint32_t arraySize;
+ VkAttachmentViewCreateFlags flags;
+} VkAttachmentViewCreateInfo;
+
+VkResult VKAPI vkCreateAttachmentView(
+ VkDevice device,
+ const VkAttachmentViewCreateInfo* pCreateInfo,
+ VkAttachmentView* pView);
+----
+
+ * Writing to color/depth/stencil attachments in various view configurations
+ ** Multipass tests will contain some coverage for this
+ ** Image layout
+ ** View size
+ ** Image mip- or array sub-range
+ * +msaaResolveImage+
+ ** TODO What is exactly this?
+
+Shaders
+-------
+
+Shader API test will verify that shader loading functions behave as expected. Verifying that various SPIR-V constructs are accepted and executed correctly however is not an objective; that will be covered more extensively by a separate SPIR-V test set.
+
+[source,c]
+----
+typedef VkFlags VkShaderModuleCreateFlags;
+typedef VkFlags VkShaderCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ size_t codeSize;
+ const void* pCode;
+ VkShaderModuleCreateFlags flags;
+} VkShaderModuleCreateInfo;
+
+VkResult VKAPI vkCreateShaderModule(
+ VkDevice device,
+ const VkShaderModuleCreateInfo* pCreateInfo,
+ VkShaderModule* pShaderModule);
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderModule module;
+ const char* pName;
+ VkShaderCreateFlags flags;
+} VkShaderCreateInfo;
+
+VkResult VKAPI vkCreateShader(
+ VkDevice device,
+ const VkShaderCreateInfo* pCreateInfo,
+ VkShader* pShader);
+----
+
+Pipelines
+---------
+
+Construction
+~~~~~~~~~~~~
+
+Pipeline tests will create various pipelines and verify that rendering results appear to match (resulting HW pipeline is correct). Fixed-function unit corner-cases nor accuracy is verified. It is not possible to exhaustively test all pipeline configurations so tests have to test some areas in isolation and extend coverage with randomized tests.
+
+[source,c]
+----
+typedef enum {
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
+} VkPipelineCreateFlagBits;
+typedef VkFlags VkPipelineCreateFlags;
+
+typedef struct {
+ uint32_t constantId;
+ size_t size;
+ uint32_t offset;
+} VkSpecializationMapEntry;
+
+typedef struct {
+ uint32_t mapEntryCount;
+ const VkSpecializationMapEntry* pMap;
+ const size_t dataSize;
+ const void* pData;
+} VkSpecializationInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderStage stage;
+ VkShader shader;
+ const VkSpecializationInfo* pSpecializationInfo;
+} VkPipelineShaderStageCreateInfo;
+
+typedef struct {
+ uint32_t binding;
+ uint32_t strideInBytes;
+ VkVertexInputStepRate stepRate;
+} VkVertexInputBindingDescription;
+
+typedef struct {
+ uint32_t location;
+ uint32_t binding;
+ VkFormat format;
+ uint32_t offsetInBytes;
+} VkVertexInputAttributeDescription;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t bindingCount;
+ const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ uint32_t attributeCount;
+ const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+} VkPipelineVertexInputStateCreateInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkPrimitiveTopology topology;
+ VkBool32 primitiveRestartEnable;
+} VkPipelineInputAssemblyStateCreateInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t patchControlPoints;
+} VkPipelineTessellationStateCreateInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t viewportCount;
+} VkPipelineViewportStateCreateInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 depthClipEnable;
+ VkBool32 rasterizerDiscardEnable;
+ VkFillMode fillMode;
+ VkCullMode cullMode;
+ VkFrontFace frontFace;
+} VkPipelineRasterStateCreateInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t rasterSamples;
+ VkBool32 sampleShadingEnable;
+ float minSampleShading;
+ VkSampleMask sampleMask;
+} VkPipelineMultisampleStateCreateInfo;
+
+typedef struct {
+ VkStencilOp stencilFailOp;
+ VkStencilOp stencilPassOp;
+ VkStencilOp stencilDepthFailOp;
+ VkCompareOp stencilCompareOp;
+} VkStencilOpState;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 depthTestEnable;
+ VkBool32 depthWriteEnable;
+ VkCompareOp depthCompareOp;
+ VkBool32 depthBoundsEnable;
+ VkBool32 stencilTestEnable;
+ VkStencilOpState front;
+ VkStencilOpState back;
+} VkPipelineDepthStencilStateCreateInfo;
+
+typedef struct {
+ VkBool32 blendEnable;
+ VkBlend srcBlendColor;
+ VkBlend destBlendColor;
+ VkBlendOp blendOpColor;
+ VkBlend srcBlendAlpha;
+ VkBlend destBlendAlpha;
+ VkBlendOp blendOpAlpha;
+ VkChannelFlags channelWriteMask;
+} VkPipelineColorBlendAttachmentState;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 alphaToCoverageEnable;
+ VkBool32 logicOpEnable;
+ VkLogicOp logicOp;
+ uint32_t attachmentCount;
+ const VkPipelineColorBlendAttachmentState* pAttachments;
+} VkPipelineColorBlendStateCreateInfo;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ const VkPipelineViewportStateCreateInfo* pViewportState;
+ const VkPipelineRasterStateCreateInfo* pRasterState;
+ const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ VkPipelineCreateFlags flags;
+ VkPipelineLayout layout;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkGraphicsPipelineCreateInfo;
+
+VkResult VKAPI vkCreateGraphicsPipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t count,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ VkPipeline* pPipelines);
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineShaderStageCreateInfo cs;
+ VkPipelineCreateFlags flags;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkComputePipelineCreateInfo;
+
+VkResult VKAPI vkCreateComputePipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t count,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ VkPipeline* pPipelines);
+----
+
+Pipeline caches
+^^^^^^^^^^^^^^^
+
+Extend pipeline tests to cases to use pipeline caches, test that pipelines created from pre-populated cache still produce identical results to pipelines created with empty cache.
+
+Verify that maximum cache size is not exceeded.
+
+[source,c]
+----
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ size_t initialSize;
+ const void* initialData;
+ size_t maxSize;
+} VkPipelineCacheCreateInfo;
+
+VkResult VKAPI vkCreatePipelineCache(
+ VkDevice device,
+ const VkPipelineCacheCreateInfo* pCreateInfo,
+ VkPipelineCache* pPipelineCache);
+
+size_t VKAPI vkGetPipelineCacheSize(
+ VkDevice device,
+ VkPipelineCache pipelineCache);
+
+VkResult VKAPI vkGetPipelineCacheData(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ void* pData);
+
+VkResult VKAPI vkMergePipelineCaches(
+ VkDevice device,
+ VkPipelineCache destCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache* pSrcCaches);
+----
+
+Pipeline state
+~~~~~~~~~~~~~~
+
+Pipeline tests, as they need to verify rendering results, will provide a lot of coverage for pipeline state manipulation. In addition some corner-case tests are needed:
+
+ * Re-setting pipeline state bits before use
+ * Carrying / manipulating only part of state over draw calls
+ * Submitting command buffers that have only pipeline state manipulation calls (should be no-op)
+
+.Spec issues
+ * Does vkCmdBindPipeline invalidate other state bits?
+
+[source,c]
+----
+void VKAPI vkCmdBindPipeline(
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline);
+
+void VKAPI vkCmdBindDescriptorSets(
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets);
+
+void VKAPI vkCmdBindIndexBuffer(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType);
+
+void VKAPI vkCmdBindVertexBuffers(
+ VkCmdBuffer cmdBuffer,
+ uint32_t startBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets);
+----
+
+Samplers
+--------
+
+Sampler tests verify that sampler parameters are mapped to correct HW state. That will be verified by sampling various textures in certain configurations (as listed below). More exhaustive texture filtering verification will be done separately.
+
+ * All valid sampler state configurations
+ * Selected texture formats (RGBA8, FP16, integer textures)
+ * All texture types
+ * Mip-mapping with explicit and implicit LOD
+
+[source,c]
+----
+typedef enum {
+ VK_TEX_FILTER_NEAREST = 0,
+ VK_TEX_FILTER_LINEAR = 1,
+ VK_TEX_FILTER_BEGIN_RANGE = VK_TEX_FILTER_NEAREST,
+ VK_TEX_FILTER_END_RANGE = VK_TEX_FILTER_LINEAR,
+ VK_TEX_FILTER_NUM = (VK_TEX_FILTER_LINEAR - VK_TEX_FILTER_NEAREST + 1),
+ VK_TEX_FILTER_MAX_ENUM = 0x7FFFFFFF
+} VkTexFilter;
+
+typedef enum {
+ VK_TEX_MIPMAP_MODE_BASE = 0,
+ VK_TEX_MIPMAP_MODE_NEAREST = 1,
+ VK_TEX_MIPMAP_MODE_LINEAR = 2,
+ VK_TEX_MIPMAP_MODE_BEGIN_RANGE = VK_TEX_MIPMAP_MODE_BASE,
+ VK_TEX_MIPMAP_MODE_END_RANGE = VK_TEX_MIPMAP_MODE_LINEAR,
+ VK_TEX_MIPMAP_MODE_NUM = (VK_TEX_MIPMAP_MODE_LINEAR - VK_TEX_MIPMAP_MODE_BASE + 1),
+ VK_TEX_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkTexMipmapMode;
+
+typedef enum {
+ VK_TEX_ADDRESS_WRAP = 0,
+ VK_TEX_ADDRESS_MIRROR = 1,
+ VK_TEX_ADDRESS_CLAMP = 2,
+ VK_TEX_ADDRESS_MIRROR_ONCE = 3,
+ VK_TEX_ADDRESS_CLAMP_BORDER = 4,
+ VK_TEX_ADDRESS_BEGIN_RANGE = VK_TEX_ADDRESS_WRAP,
+ VK_TEX_ADDRESS_END_RANGE = VK_TEX_ADDRESS_CLAMP_BORDER,
+ VK_TEX_ADDRESS_NUM = (VK_TEX_ADDRESS_CLAMP_BORDER - VK_TEX_ADDRESS_WRAP + 1),
+ VK_TEX_ADDRESS_MAX_ENUM = 0x7FFFFFFF
+} VkTexAddress;
+
+typedef enum {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
+ VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE,
+ VK_BORDER_COLOR_NUM = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1),
+ VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF
+} VkBorderColor;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkTexFilter magFilter;
+ VkTexFilter minFilter;
+ VkTexMipmapMode mipMode;
+ VkTexAddress addressU;
+ VkTexAddress addressV;
+ VkTexAddress addressW;
+ float mipLodBias;
+ float maxAnisotropy;
+ VkBool32 compareEnable;
+ VkCompareOp compareOp;
+ float minLod;
+ float maxLod;
+ VkBorderColor borderColor;
+} VkSamplerCreateInfo;
+
+VkResult VKAPI vkCreateSampler(
+ VkDevice device,
+ const VkSamplerCreateInfo* pCreateInfo,
+ VkSampler* pSampler);
+----
+
+Dynamic state objects
+---------------------
+
+Pipeline tests will include coverage for most dynamic state object usage as some pipeline configurations need corresponding dynamic state objects. In addition there are couple of corner-cases worth exploring separately:
+
+ * Re-setting dynamic state bindings one or more times before first use
+ * Dynamic state object binding persistence over pipeline changes
+ * Large amounts of unique dynamic state objects in a command buffer, pass, or multipass
+
+[source,c]
+----
+// Viewport
+
+typedef struct {
+ float originX;
+ float originY;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+} VkViewport;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t viewportAndScissorCount;
+ const VkViewport* pViewports;
+ const VkRect2D* pScissors;
+} VkDynamicViewportStateCreateInfo;
+
+VkResult VKAPI vkCreateDynamicViewportState(
+ VkDevice device,
+ const VkDynamicViewportStateCreateInfo* pCreateInfo,
+ VkDynamicViewportState* pState);
+
+void VKAPI vkCmdBindDynamicViewportState(
+ VkCmdBuffer cmdBuffer,
+ VkDynamicViewportState dynamicViewportState);
+
+// Raster
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ float depthBias;
+ float depthBiasClamp;
+ float slopeScaledDepthBias;
+ float lineWidth;
+} VkDynamicRasterStateCreateInfo;
+
+VkResult VKAPI vkCreateDynamicRasterState(
+ VkDevice device,
+ const VkDynamicRasterStateCreateInfo* pCreateInfo,
+ VkDynamicRasterState* pState);
+
+void VKAPI vkCmdBindDynamicRasterState(
+ VkCmdBuffer cmdBuffer,
+ VkDynamicRasterState dynamicRasterState);
+
+// Color blend
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ float blendConst[4];
+} VkDynamicColorBlendStateCreateInfo;
+
+VkResult VKAPI vkCreateDynamicColorBlendState(
+ VkDevice device,
+ const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
+ VkDynamicColorBlendState* pState);
+
+void VKAPI vkCmdBindDynamicColorBlendState(
+ VkCmdBuffer cmdBuffer,
+ VkDynamicColorBlendState dynamicColorBlendState);
+
+// Depth & stencil
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ float minDepthBounds;
+ float maxDepthBounds;
+ uint32_t stencilReadMask;
+ uint32_t stencilWriteMask;
+ uint32_t stencilFrontRef;
+ uint32_t stencilBackRef;
+} VkDynamicDepthStencilStateCreateInfo;
+
+VkResult VKAPI vkCreateDynamicDepthStencilState(
+ VkDevice device,
+ const VkDynamicDepthStencilStateCreateInfo* pCreateInfo,
+ VkDynamicDepthStencilState* pState);
+
+void VKAPI vkCmdBindDynamicDepthStencilState(
+ VkCmdBuffer cmdBuffer,
+ VkDynamicDepthStencilState dynamicDepthStencilState);
+----
+
+Command buffers
+---------------
+
+Tests for various rendering features will provide significant coverage for command buffer recording. Additional coverage will be needed for:
+
+ * Re-setting command buffers
+ * Very small (empty) and large command buffers
+ * Various optimize flags combined with various command buffer sizes and contents
+ ** Forcing optimize flags in other tests might be useful for finding cases that may break
+
+[source,c]
+----
+typedef enum {
+ VK_CMD_BUFFER_LEVEL_PRIMARY = 0,
+ VK_CMD_BUFFER_LEVEL_SECONDARY = 1,
+ VK_CMD_BUFFER_LEVEL_BEGIN_RANGE = VK_CMD_BUFFER_LEVEL_PRIMARY,
+ VK_CMD_BUFFER_LEVEL_END_RANGE = VK_CMD_BUFFER_LEVEL_SECONDARY,
+ VK_CMD_BUFFER_LEVEL_NUM = (VK_CMD_BUFFER_LEVEL_SECONDARY - VK_CMD_BUFFER_LEVEL_PRIMARY + 1),
+ VK_CMD_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF
+} VkCmdBufferLevel;
+
+typedef VkFlags VkCmdBufferCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkCmdPool cmdPool;
+ VkCmdBufferLevel level;
+ VkCmdBufferCreateFlags flags;
+} VkCmdBufferCreateInfo;
+
+VkResult VKAPI vkCreateCommandBuffer(
+ VkDevice device,
+ const VkCmdBufferCreateInfo* pCreateInfo,
+ VkCmdBuffer* pCmdBuffer);
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ VkCmdBufferOptimizeFlags flags;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+} VkCmdBufferBeginInfo;
+
+typedef enum {
+ VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT = 0x00000001,
+ VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT = 0x00000002,
+ VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT = 0x00000004,
+ VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT = 0x00000008,
+ VK_CMD_BUFFER_OPTIMIZE_NO_SIMULTANEOUS_USE_BIT = 0x00000010,
+} VkCmdBufferOptimizeFlagBits;
+typedef VkFlags VkCmdBufferOptimizeFlags;
+
+VkResult VKAPI vkBeginCommandBuffer(
+ VkCmdBuffer cmdBuffer,
+ const VkCmdBufferBeginInfo* pBeginInfo);
+
+VkResult VKAPI vkEndCommandBuffer(
+ VkCmdBuffer cmdBuffer);
+
+typedef enum {
+ VK_CMD_BUFFER_RESET_RELEASE_RESOURCES = 0x00000001,
+} VkCmdBufferResetFlagBits;
+typedef VkFlags VkCmdBufferResetFlags;
+
+VkResult VKAPI vkResetCommandBuffer(
+ VkCmdBuffer cmdBuffer,
+ VkCmdBufferResetFlags flags);
+----
+
+Command pools
+~~~~~~~~~~~~~
+
+TODO
+
+[source,c]
+----
+typedef enum {
+ VK_CMD_POOL_CREATE_TRANSIENT_BIT = 0x00000001,
+ VK_CMD_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,
+} VkCmdPoolCreateFlagBits;
+typedef VkFlags VkCmdPoolCreateFlags;
+
+typedef struct {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t queueFamilyIndex;
+ VkCmdPoolCreateFlags flags;
+} VkCmdPoolCreateInfo;
+
+VkResult VKAPI vkCreateCommandPool(
+ VkDevice device,
+ const VkCmdPoolCreateInfo* pCreateInfo,
+ VkCmdPool* pCmdPool);
+
+typedef enum {
+ VK_CMD_POOL_RESET_RELEASE_RESOURCES = 0x00000001,
+} VkCmdPoolResetFlagBits;
+typedef VkFlags VkCmdPoolResetFlags;
+
+VkResult VKAPI vkResetCommandPool(
+ VkDevice device,
+ VkCmdPool cmdPool,
+ VkCmdPoolResetFlags flags);
+----
+
+2-level command buffers
+~~~~~~~~~~~~~~~~~~~~~~~
+
+TODO
+
+[source,c]
+----
+void VKAPI vkCmdExecuteCommands(
+ VkCmdBuffer cmdBuffer,
+ uint32_t cmdBuffersCount,
+ const VkCmdBuffer* pCmdBuffers);
+----
+
+Draw commands
+-------------
+
+Draw command tests verify that all draw parameters are respected (including vertex input state) and various draw call sizes work correctly. The tests won't however validate that all side effects of shader invocations happen as intended (covered by feature-specific tests) nor that primitive rasterization is fully correct (will be covered by separate targeted tests).
+
+[source,c]
+----
+void VKAPI vkCmdDraw(
+ VkCmdBuffer cmdBuffer,
+ uint32_t firstVertex,
+ uint32_t vertexCount,
+ uint32_t firstInstance,
+ uint32_t instanceCount);
+
+void VKAPI vkCmdDrawIndexed(
+ VkCmdBuffer cmdBuffer,
+ uint32_t firstIndex,
+ uint32_t indexCount,
+ int32_t vertexOffset,
+ uint32_t firstInstance,
+ uint32_t instanceCount);
+
+void VKAPI vkCmdDrawIndirect(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t count,
+ uint32_t stride);
+
+void VKAPI vkCmdDrawIndexedIndirect(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t count,
+ uint32_t stride);
+----
+
+Compute
+-------
+
+Like draw tests, compute dispatch tests will validate that call parameters have desired effects. In addition compute tests need to verify that various dispatch parameters (number of work groups, invocation IDs) are passed correctly to the shader invocations.
+
+NOTE: Assuming that compute-specific shader features, such as shared memory access, is covered by SPIR-V tests.
+
+[source,c]
+----
+void VKAPI vkCmdDispatch(
+ VkCmdBuffer cmdBuffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z);
+
+void VKAPI vkCmdDispatchIndirect(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset);
+----
+
+Copies and blits
+----------------
+
+Buffer copies
+~~~~~~~~~~~~~
+
+Buffer copy tests need to validate that copies and updates happen as expected for both simple and more complex cases:
+
+ * Whole-buffer, partial copies
+ * Small (1 byte) to very large copies and updates
+ * Copies between objects backed by same memory
+
+NOTE: GPU cache control tests need to verify copy source and destination visibility as well.
+
+.Spec issues
+ * Overlapping copies?
+ ** Simple overlap (same buffer)
+ ** Backed by same memory object
+
+[source,c]
+----
+typedef struct {
+ VkDeviceSize srcOffset;
+ VkDeviceSize destOffset;
+ VkDeviceSize copySize;
+} VkBufferCopy;
+
+void VKAPI vkCmdCopyBuffer(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer destBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* pRegions);
+
+void VKAPI vkCmdUpdateBuffer(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize dataSize,
+ const uint32_t* pData);
+
+void VKAPI vkCmdFillBuffer(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize fillSize,
+ uint32_t data);
+----
+
+Image copies
+~~~~~~~~~~~~
+
+.Spec issues
+ * What kind of copies are allowed? Blits?
+ * Copy is simply reinterpretation of data?
+ * Does blit unpack & pack data like in GL?
+ ** sRGB conversions
+
+[source,c]
+----
+typedef struct {
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D extent;
+} VkImageCopy;
+
+typedef struct {
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkExtent3D srcExtent;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D destExtent;
+} VkImageBlit;
+
+void VKAPI vkCmdCopyImage(
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy* pRegions);
+
+void VKAPI vkCmdBlitImage(
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit* pRegions,
+ VkTexFilter filter);
+----
+
+Copies between buffers and images
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+typedef struct {
+ VkDeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ VkImageSubresource imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy;
+
+void VKAPI vkCmdCopyBufferToImage(
+ VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+void VKAPI vkCmdCopyImageToBuffer(
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+----
+
+Clearing images
+~~~~~~~~~~~~~~~
+
+[source,c]
+----
+typedef union {
+ float f32[4];
+ int32_t s32[4];
+ uint32_t u32[4];
+} VkClearColorValue;
+
+typedef struct {
+ float depth;
+ uint32_t stencil;
+} VkClearDepthStencilValue;
+
+typedef union {
+ VkClearColorValue color;
+ VkClearDepthStencilValue ds;
+} VkClearValue;
+
+void VKAPI vkCmdClearColorImage(
+ VkCmdBuffer cmdBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearColorValue* pColor,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+void VKAPI vkCmdClearDepthStencilImage(
+ VkCmdBuffer cmdBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ float depth,
+ uint32_t stencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+void VKAPI vkCmdClearColorAttachment(
+ VkCmdBuffer cmdBuffer,
+ uint32_t colorAttachment,
+ VkImageLayout imageLayout,
+ const VkClearColorValue* pColor,
+ uint32_t rectCount,
+ const VkRect3D* pRects);
+
+void VKAPI vkCmdClearDepthStencilAttachment(
+ VkCmdBuffer cmdBuffer,
+ VkImageAspectFlags imageAspectMask,
+ VkImageLayout imageLayout,
+ float depth,
+ uint32_t stencil,
+ uint32_t rectCount,
+ const VkRect3D* pRects);
+----
+
+Multisample resolve
+~~~~~~~~~~~~~~~~~~~
+
+[source,c]
+----
+typedef struct {
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D extent;
+} VkImageResolve;
+
+void VKAPI vkCmdResolveImage(
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageResolve* pRegions);
+----
+
+Push constants
+--------------
+
+TODO
+
+[source,c]
+----
+void VKAPI vkCmdPushConstants(
+ VkCmdBuffer cmdBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t start,
+ uint32_t length,
+ const void* values);
+----
+
+GPU timestamps
+--------------
+
+[source,c]
+----
+typedef enum {
+ VK_TIMESTAMP_TYPE_TOP = 0,
+ VK_TIMESTAMP_TYPE_BOTTOM = 1,
+ VK_TIMESTAMP_TYPE_BEGIN_RANGE = VK_TIMESTAMP_TYPE_TOP,
+ VK_TIMESTAMP_TYPE_END_RANGE = VK_TIMESTAMP_TYPE_BOTTOM,
+ VK_TIMESTAMP_TYPE_NUM = (VK_TIMESTAMP_TYPE_BOTTOM - VK_TIMESTAMP_TYPE_TOP + 1),
+ VK_TIMESTAMP_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkTimestampType;
+
+void VKAPI vkCmdWriteTimestamp(
+ VkCmdBuffer cmdBuffer,
+ VkTimestampType timestampType,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset);
+----
+
+ * All timestamp types
+ * Various commands before and after timestamps
+ * Command buffers that only record timestamps
+ * Sanity check (to the extent possible) for timestamps
+ ** TOP >= BOTTOM
+
+.Spec issues
+ * How many bytes timestamp is? Do we need to support both 32-bit and 64-bit?
+ * destOffset probably needs to be aligned?
+ * TOP vs. BOTTOM not well specified
+
+Validation layer tests
+----------------------
+
+Validation layer tests exercise all relevant invalid API usage patterns and verify that correct return values and error messages are generated. In addition validation tests would try to load invalid SPIR-V binaries and verify that all generic SPIR-V, and Vulkan SPIR-V environment rules are checked.
+
+Android doesn't plan to ship validation layer as part of the system image so validation tests are not required by Android CTS and thus are of very low priority currently.
--- /dev/null
+[attributes]\r
+newline=\n\r
+\r
+[replacements]\r
+\+\/-=±\r
"a18233c99e1dc59a256180e6871d9305a42e91b3f98799b3ceb98e87e9ec5e31",
"libpng",
postExtract = postExtractLibpng),
+ GitRepo(
+ "git@gitlab.khronos.org:spirv/spirv-tools.git",
+ "eac78e60d02c1315ae2f56866fb291dd6c606107",
+ "spirv-tools"),
+ GitRepo(
+ "https://github.com/KhronosGroup/glslang",
+ "ee21fc9081de8b54f5d1199325598c71eead1ef6",
+ "glslang"),
]
def parseArgs ():
--- /dev/null
+# cmake file for glslang
+
+if (NOT DE_DEFS)
+ message(FATAL_ERROR "Include Defs.cmake")
+endif ()
+
+if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/SPIRV/GlslangToSpv.cpp")
+ set(DEFAULT_GLSLANG_SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/src)
+else ()
+ set(DEFAULT_GLSLANG_SRC_PATH "../glslang")
+endif ()
+
+set(GLSLANG_SRC_PATH ${DEFAULT_GLSLANG_SRC_PATH} CACHE STRING "Path to glslang source tree")
+
+if (IS_ABSOLUTE ${GLSLANG_SRC_PATH})
+ set(GLSLANG_ABS_PATH ${GLSLANG_SRC_PATH})
+else ()
+ set(GLSLANG_ABS_PATH "${CMAKE_SOURCE_DIR}/${GLSLANG_SRC_PATH}")
+endif ()
+
+find_package(BISON)
+
+# \todo [2015-06-24 pyry] Full C++11 support on Android requires using CLang + libc++
+if (NOT BISON_FOUND AND DE_OS_IS_WIN32 AND EXISTS ${GLSLANG_ABS_PATH}/tools/bison.exe)
+ message(STATUS "Using pre-built bison executable")
+ set(BISON_EXECUTABLE ${GLSLANG_ABS_PATH}/tools/bison.exe)
+ set(BISON_FOUND ON)
+endif ()
+
+if (BISON_FOUND AND EXISTS ${GLSLANG_ABS_PATH}/glslang/GenericCodeGen/CodeGen.cpp AND NOT DE_OS_IS_ANDROID)
+ message(STATUS "glslang found; building with DEQP_SUPPORT_GLSLANG")
+
+ include_directories(
+ .
+ ${GLSLANG_ABS_PATH}
+ ${GLSLANG_ABS_PATH}/glslang
+ ${GLSLANG_ABS_PATH}/glslang/Include
+ ${GLSLANG_ABS_PATH}/glslang/Public
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent
+ ${GLSLANG_ABS_PATH}/glslang/GenericCodeGen
+ ${GLSLANG_ABS_PATH}/OGLCompilersDLL
+ ${GLSLANG_ABS_PATH}/SPIRV
+ ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ set(GLSLANG_SRCS
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/Constant.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/InfoSink.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/Initialize.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/IntermTraverse.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/Intermediate.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/ParseHelper.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/PoolAlloc.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/RemoveTree.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/Scan.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/ShaderLang.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/SymbolTable.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/Versions.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/intermOut.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/limits.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/linkValidate.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/parseConst.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/reflection.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/Pp.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/PpAtom.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/PpContext.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/PpMemory.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/PpScanner.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/PpSymbols.cpp
+ ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/preprocessor/PpTokens.cpp
+ ${GLSLANG_ABS_PATH}/glslang/GenericCodeGen/CodeGen.cpp
+ ${GLSLANG_ABS_PATH}/glslang/GenericCodeGen/Link.cpp
+ ${GLSLANG_ABS_PATH}/OGLCompilersDLL/InitializeDll.cpp
+
+ ${GLSLANG_ABS_PATH}/SPIRV/GlslangToSpv.cpp
+ ${GLSLANG_ABS_PATH}/SPIRV/SpvBuilder.cpp
+ ${GLSLANG_ABS_PATH}/SPIRV/SPVRemapper.cpp
+ ${GLSLANG_ABS_PATH}/SPIRV/doc.cpp
+ ${GLSLANG_ABS_PATH}/SPIRV/disassemble.cpp
+
+ ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp
+ ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h
+
+ osinclude.h
+ osinclude.cpp
+ )
+
+ set(CMAKE_C_FLAGS ${DE_3RD_PARTY_C_FLAGS})
+ set(CMAKE_CXX_FLAGS ${DE_3RD_PARTY_CXX_FLAGS})
+
+ if (DE_COMPILER_IS_GCC OR DE_COMPILER_IS_CLANG)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+ endif ()
+
+ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h
+ COMMAND ${BISON_EXECUTABLE} --defines=${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o ${CMAKE_CURRENT_BINARY_DIR}/glslang_tab.cpp
+ MAIN_DEPENDENCY ${GLSLANG_ABS_PATH}/glslang/MachineIndependent/glslang.y
+ WORKING_DIRECTORY ${GLSLANG_ABS_PATH}/glslang)
+
+ add_library(glslang STATIC ${GLSLANG_SRCS})
+ target_link_libraries(glslang dethread ${ZLIB_LIBRARY})
+
+ set(GLSLANG_INCLUDE_PATH ${GLSLANG_ABS_PATH} PARENT_SCOPE)
+ set(GLSLANG_LIBRARY glslang PARENT_SCOPE)
+ set(DEQP_HAVE_GLSLANG ON PARENT_SCOPE)
+
+else ()
+ message(STATUS "glslang not found; GLSL to SPIR-V compilation not available")
+
+ set(DEQP_HAVE_GLSLANG OFF PARENT_SCOPE)
+
+endif ()
--- /dev/null
+/*-------------------------------------------------------------------------
+ * dEQP glslang integration
+ * ------------------------
+ *
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief glslang OS interface.
+ *//*--------------------------------------------------------------------*/
+
+#include "osinclude.h"
+
+namespace glslang
+{
+
+// Thread-local
+
+OS_TLSIndex OS_AllocTLSIndex (void)
+{
+ return deThreadLocal_create();
+}
+
+bool OS_SetTLSValue (OS_TLSIndex nIndex, void* lpvValue)
+{
+ deThreadLocal_set(nIndex, lpvValue);
+ return true;
+}
+
+bool OS_FreeTLSIndex (OS_TLSIndex nIndex)
+{
+ deThreadLocal_destroy(nIndex);
+ return true;
+}
+
+void* OS_GetTLSValue (OS_TLSIndex nIndex)
+{
+ return deThreadLocal_get(nIndex);
+}
+
+// Global lock - not used
+
+void InitGlobalLock (void)
+{
+}
+
+void GetGlobalLock (void)
+{
+}
+
+void ReleaseGlobalLock (void)
+{
+}
+
+// Threading
+
+DE_STATIC_ASSERT(sizeof(void*) >= sizeof(deThread));
+
+void* OS_CreateThread (TThreadEntrypoint entry)
+{
+ return (void*)(deUintptr)deThread_create(entry, DE_NULL, DE_NULL);
+}
+
+void OS_WaitForAllThreads (void* threads, int numThreads)
+{
+ for (int ndx = 0; ndx < numThreads; ndx++)
+ {
+ const deThread thread = (deThread)(deUintptr)((void**)threads)[ndx];
+ deThread_join(thread);
+ deThread_destroy(thread);
+ }
+}
+
+void OS_Sleep (int milliseconds)
+{
+ deSleep(milliseconds);
+}
+
+void OS_DumpMemoryCounters (void)
+{
+ // Not used
+}
+
+} // glslang
--- /dev/null
+#ifndef _OSINCLUDE_H
+#define _OSINCLUDE_H
+/*-------------------------------------------------------------------------
+ * dEQP glslang integration
+ * ------------------------
+ *
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief glslang OS interface.
+ *//*--------------------------------------------------------------------*/
+
+#include "deDefs.hpp"
+#include "deThreadLocal.h"
+#include "deThread.h"
+
+namespace glslang
+{
+
+// Thread-local
+
+typedef deThreadLocal OS_TLSIndex;
+
+#define OS_INVALID_TLS_INDEX DE_NULL
+
+OS_TLSIndex OS_AllocTLSIndex (void);
+bool OS_SetTLSValue (OS_TLSIndex nIndex, void* lpvValue);
+bool OS_FreeTLSIndex (OS_TLSIndex nIndex);
+
+void* OS_GetTLSValue (OS_TLSIndex nIndex);
+
+// Global lock?
+
+void InitGlobalLock (void);
+void GetGlobalLock (void);
+void ReleaseGlobalLock (void);
+
+// Threading
+
+typedef deThreadFunc TThreadEntrypoint;
+
+void* OS_CreateThread (TThreadEntrypoint);
+void OS_WaitForAllThreads (void* threads, int numThreads);
+
+void OS_Sleep (int milliseconds);
+
+void OS_DumpMemoryCounters (void);
+
+} // glslang
+
+#endif /* _OSINCLUDE_H */
--- /dev/null
+# cmake file for spirv-tools
+
+if (NOT DE_DEFS)
+ message(FATAL_ERROR "Include Defs.cmake")
+endif ()
+
+if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/include/libspirv/libspirv.h")
+ set(DEFAULT_SPIRV_TOOLS_SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/src)
+else ()
+ set(DEFAULT_SPIRV_TOOLS_SRC_PATH "../spirv-tools")
+endif ()
+
+set(SPIRV_TOOLS_SRC_PATH ${DEFAULT_SPIRV_TOOLS_SRC_PATH} CACHE STRING "Path to spirv-tools source tree")
+if (IS_ABSOLUTE ${SPIRV_TOOLS_SRC_PATH})
+ set(SPIRV_TOOLS_ABS_PATH ${SPIRV_TOOLS_SRC_PATH})
+else ()
+ set(SPIRV_TOOLS_ABS_PATH "${CMAKE_SOURCE_DIR}/${SPIRV_TOOLS_SRC_PATH}")
+endif ()
+
+if (EXISTS ${SPIRV_TOOLS_ABS_PATH}/source/opcode.cpp)
+ message(STATUS "spirv-tools found; building with DEQP_HAVE_SPIRV_TOOLS")
+ set(CMAKE_C_FLAGS ${DE_3RD_PARTY_C_FLAGS})
+ set(CMAKE_CXX_FLAGS ${DE_3RD_PARTY_CXX_FLAGS})
+
+ set(DEQP_HAVE_SPIRV_TOOLS ON PARENT_SCOPE)
+ set(SPIRV_SKIP_EXECUTABLES ON)
+ add_subdirectory(${SPIRV_TOOLS_ABS_PATH} spirv-tools)
+else ()
+ message(STATUS "spirv-tools not found; SPIR-V assembly not available")
+ set(DEQP_HAVE_SPIRV_TOOLS OFF PARENT_SCOPE)
+endif ()
--- /dev/null
+
+Copyright (c) 2015 Google Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and/or associated documentation files (the
+"Materials"), to deal in the Materials without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Materials, and to
+permit persons to whom the Materials are furnished to do so, subject to
+the following conditions:
+
+The above copyright notice(s) and this permission notice shall be
+included in all copies or substantial portions of the Materials.
+
+The Materials are Confidential Information as defined by the
+Khronos Membership Agreement until designated non-confidential by
+Khronos, at which point this condition clause shall be removed.
+
+THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
--- /dev/null
+Vulkan CTS README
+=================
+
+This document describes how to build and run Vulkan Conformance Test suite.
+
+Vulkan CTS is built on dEQP framework. General dEQP documentation is available
+at http://source.android.com/devices/graphics/testing.html
+
+
+Requirements
+------------
+
+Common:
+ * Git (for checking out sources)
+ * Python 2.7.x (all recent versions in 2.x should work, 3.x is not supported)
+ * CMake 2.8 or newer
+
+Win32:
+ * Visual Studio 2013 (glslang uses several C++11 features)
+
+Linux:
+ * Standard toolchain (make, gcc/clang)
+
+
+Building
+--------
+
+To build dEQP, you need first to download sources for zlib, libpng, glslang,
+and spirv-tools.
+
+To download sources, run:
+
+$ python external/fetch_sources.py
+
+You may need to re-run fetch_sources.py to update to the latest glslang and
+spirv-tools revisions occasionally.
+
+NOTE: glslang integration is not yet available on Android due to a toolchain
+bug, so pre-compiled SPIR-V binaries must be used. See instructions below.
+
+
+Running
+-------
+
+Win32:
+
+> cd builddir/external/vulkancts/modules/vulkan
+> Debug/deqp-vk.exe
+
+Linux:
+
+$ cd builddir/external/vulkancts/modules/vulkan
+$ ./deqp-vk
+
+Android:
+
+Using Cherry is recommended. Alternatively you can follow instructions at
+http://source.android.com/devices/graphics/run-tests.html
+
+
+Pre-compiling SPIR-V binaries
+-----------------------------
+
+For distribution, and platforms that don't support GLSL to SPIR-V compilation,
+SPIR-V binaries must be pre-built with following command:
+
+$ python external/vulkancts/build_spirv_binaries.py
+
+Binaries will be written to external/vulkancts/data/vulkan/prebuilt/.
+
+Test modules (or in case of Android, the APK) must be re-built after building
+SPIR-V programs in order for the binaries to be available.
+
+
+Vulkan platform port
+--------------------
+
+Vulkan support from Platform implementation requires providing
+getVulkanPlatform() method in tcu::Platform class implementation.
+
+See framework/common/tcuPlatform.hpp and examples in
+framework/platform/win32/tcuWin32Platform.cpp and
+framework/platform/android/tcuAndroidPlatform.cpp.
+
+
+Null (dummy) driver
+-------------------
+
+For testing and development purposes it might be useful to be able to run
+tests on dummy Vulkan implementation. One such implementation is provided in
+vkNullDriver.cpp. To use that, implement vk::Platform::createLibrary() with
+vk::createNullDriver().
+
+
+Cherry GUI
+----------
+
+Vulkan test module can be used with Cherry (GUI for test execution and
+analysis). Cherry is available at
+https://android.googlesource.com/platform/external/cherry. Please follow
+instructions in README to get started.
+
+To enable support for Vulkan tests, dEQP-VK module must be added to list of
+test packages.
+
+In cherry/testrunner.go, add following line to testPackageDescriptors list
+(line 608 in NewTestRunner function):
+
+{"dEQP-VK", "deqp-vk", "../external/vulkancts/modules/vulkan", dataDir + "dEQP-VK-cases.xml"},
+
+Before first launch, and every time test hierarchy has been modified, test
+case list must be refreshed by running:
+
+$ python scripts/build_caselists.py path/to/cherry/data
+
+Cherry must be restarted for the case list update to take effect.
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#-------------------------------------------------------------------------
+# Vulkan CTS
+# ----------
+#
+# Copyright (c) 2015 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and/or associated documentation files (the
+# "Materials"), to deal in the Materials without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Materials, and to
+# permit persons to whom the Materials are furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice(s) and this permission notice shall be
+# included in all copies or substantial portions of the Materials.
+#
+# The Materials are Confidential Information as defined by the
+# Khronos Membership Agreement until designated non-confidential by
+# Khronos, at which point this condition clause shall be removed.
+#
+# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+#
+#-------------------------------------------------------------------------
+
+import os
+import sys
+import string
+import argparse
+import tempfile
+import shutil
+import fnmatch
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "scripts"))
+
+from build.common import *
+from build.config import *
+from build.build import *
+
+class Module:
+ def __init__ (self, name, dirName, binName):
+ self.name = name
+ self.dirName = dirName
+ self.binName = binName
+
+VULKAN_MODULE = Module("dEQP-VK", "../external/vulkancts/modules/vulkan", "deqp-vk")
+DEFAULT_BUILD_DIR = os.path.join(tempfile.gettempdir(), "spirv-binaries", "{targetName}-{buildType}")
+DEFAULT_TARGET = "null"
+DEFAULT_DST_DIR = os.path.join(DEQP_DIR, "external", "vulkancts", "data", "vulkan", "prebuilt")
+
+def getBuildConfig (buildPathPtrn, targetName, buildType):
+ buildPath = buildPathPtrn.format(
+ targetName = targetName,
+ buildType = buildType)
+
+ return BuildConfig(buildPath, buildType, ["-DDEQP_TARGET=%s" % targetName])
+
+def cleanDstDir (dstPath):
+ binFiles = [f for f in os.listdir(dstPath) if os.path.isfile(os.path.join(dstPath, f)) and fnmatch.fnmatch(f, "*.spirv")]
+
+ for binFile in binFiles:
+ print "Removing %s" % os.path.join(dstPath, binFile)
+ os.remove(os.path.join(dstPath, binFile))
+
+def execBuildPrograms (buildCfg, generator, module, mode, dstPath):
+ workDir = os.path.join(buildCfg.getBuildDir(), "modules", module.dirName)
+
+ pushWorkingDir(workDir)
+
+ try:
+ binPath = generator.getBinaryPath(buildCfg.getBuildType(), os.path.join(".", "vk-build-programs"))
+ execute([binPath, "--mode", mode, "--dst-path", dstPath])
+ finally:
+ popWorkingDir()
+
+def parseArgs ():
+ parser = argparse.ArgumentParser(description = "Build SPIR-V programs",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument("-b",
+ "--build-dir",
+ dest="buildDir",
+ default=DEFAULT_BUILD_DIR,
+ help="Temporary build directory")
+ parser.add_argument("-t",
+ "--build-type",
+ dest="buildType",
+ default="Debug",
+ help="Build type")
+ parser.add_argument("-c",
+ "--deqp-target",
+ dest="targetName",
+ default=DEFAULT_TARGET,
+ help="dEQP build target")
+ parser.add_argument("--mode",
+ dest="mode",
+ default="build",
+ help="Build mode (build or verify)")
+ parser.add_argument("-d",
+ "--dst-path",
+ dest="dstPath",
+ default=DEFAULT_DST_DIR,
+ help="Destination path")
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = parseArgs()
+
+ generator = ANY_GENERATOR
+ buildCfg = getBuildConfig(args.buildDir, args.targetName, args.buildType)
+ module = VULKAN_MODULE
+
+ build(buildCfg, generator, ["vk-build-programs"])
+
+ if args.mode == "build":
+ if os.path.exists(args.dstPath):
+ cleanDstDir(args.dstPath)
+ else:
+ os.makedirs(args.dstPath)
+
+ execBuildPrograms(buildCfg, generator, module, args.mode, args.dstPath)
--- /dev/null
+# vk - Vulkan utilites
+
+set(VKUTIL_SRCS
+ vkApiVersion.cpp
+ vkApiVersion.hpp
+ vkBuilderUtil.cpp
+ vkBuilderUtil.hpp
+ vkDefs.cpp
+ vkDefs.hpp
+ vkRef.cpp
+ vkRef.hpp
+ vkRefUtil.cpp
+ vkRefUtil.hpp
+ vkPlatform.cpp
+ vkPlatform.hpp
+ vkPrograms.cpp
+ vkPrograms.hpp
+ vkStrUtil.cpp
+ vkStrUtil.hpp
+ vkQueryUtil.cpp
+ vkQueryUtil.hpp
+ vkMemUtil.cpp
+ vkMemUtil.hpp
+ vkDeviceUtil.cpp
+ vkDeviceUtil.hpp
+ vkGlslToSpirV.cpp
+ vkGlslToSpirV.hpp
+ vkSpirVAsm.hpp
+ vkSpirVAsm.cpp
+ vkSpirVProgram.hpp
+ vkSpirVProgram.cpp
+ vkBinaryRegistry.cpp
+ vkBinaryRegistry.hpp
+ vkNullDriver.cpp
+ vkNullDriver.hpp
+ vkImageUtil.cpp
+ vkImageUtil.hpp
+ )
+
+set(VKUTIL_LIBS
+ glutil
+ tcutil
+ )
+
+if (DEQP_HAVE_GLSLANG)
+ include_directories(${GLSLANG_INCLUDE_PATH})
+ add_definitions(-DDEQP_HAVE_GLSLANG=1)
+
+ # \note Code interfacing with glslang needs to include third-party headers
+ # that cause all sorts of warnings to appear.
+ if (DE_COMPILER_IS_GCC OR DE_COMPILER_IS_CLANG)
+ set_source_files_properties(
+ FILES vkGlslToSpirV.cpp
+ PROPERTIES COMPILE_FLAGS "${DE_3RD_PARTY_CXX_FLAGS} -std=c++11")
+ endif ()
+
+ set(VKUTIL_LIBS ${VKUTIL_LIBS} ${GLSLANG_LIBRARY})
+endif ()
+
+if(DEQP_HAVE_SPIRV_TOOLS)
+ include_directories(${spirv-tools_SOURCE_DIR}/include)
+ include_directories(${spirv-tools_SOURCE_DIR}/external/include)
+
+ add_definitions(-DDEQP_HAVE_SPIRV_TOOLS=1)
+ set(VKUTIL_LIBS ${VKUTIL_LIBS} SPIRV-TOOLS)
+endif()
+
+add_library(vkutil STATIC ${VKUTIL_SRCS})
+target_link_libraries(vkutil ${VKUTIL_LIBS})
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan api version.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkApiVersion.hpp"
+
+namespace vk
+{
+
+ApiVersion unpackVersion (deUint32 version)
+{
+ return ApiVersion((version & 0xFFC00000) >> 22,
+ (version & 0x003FF000) >> 12,
+ version & 0x00000FFF);
+}
+
+deUint32 pack (const ApiVersion& version)
+{
+ DE_ASSERT((version.majorNum & ~0x3FF) == 0);
+ DE_ASSERT((version.minorNum & ~0x3FF) == 0);
+ DE_ASSERT((version.patchNum & ~0xFFF) == 0);
+
+ return (version.majorNum << 22) | (version.minorNum << 12) | version.patchNum;
+}
+
+} // vk
--- /dev/null
+#ifndef _VKAPIVERSION_HPP
+#define _VKAPIVERSION_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan api version.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+
+#include <ostream>
+
+namespace vk
+{
+
+struct ApiVersion
+{
+ deUint32 majorNum;
+ deUint32 minorNum;
+ deUint32 patchNum;
+
+ ApiVersion (deUint32 majorNum_,
+ deUint32 minorNum_,
+ deUint32 patchNum_)
+ : majorNum (majorNum_)
+ , minorNum (minorNum_)
+ , patchNum (patchNum_)
+ {
+ }
+};
+
+ApiVersion unpackVersion (deUint32 version);
+deUint32 pack (const ApiVersion& version);
+
+inline std::ostream& operator<< (std::ostream& s, const ApiVersion& version)
+{
+ return s << version.majorNum << "." << version.minorNum << "." << version.patchNum;
+}
+
+} // vk
+
+#endif // _VKAPIVERSION_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+enum { VK_API_VERSION = VK_MAKE_VERSION(0, 138, 2) };
+enum { VK_MAX_PHYSICAL_DEVICE_NAME = 256 };
+enum { VK_MAX_EXTENSION_NAME = 256 };
+enum { VK_UUID_LENGTH = 16 };
+enum { VK_MAX_MEMORY_TYPES = 32 };
+enum { VK_MAX_MEMORY_HEAPS = 16 };
+enum { VK_MAX_DESCRIPTION = 256 };
+enum { VK_FALSE = 0 };
+enum { VK_TRUE = 1 };
+enum { VK_ATTACHMENT_UNUSED = (~0u) };
+
+VK_DEFINE_HANDLE (VkInstance, HANDLE_TYPE_INSTANCE);
+VK_DEFINE_HANDLE (VkPhysicalDevice, HANDLE_TYPE_PHYSICAL_DEVICE);
+VK_DEFINE_HANDLE (VkDevice, HANDLE_TYPE_DEVICE);
+VK_DEFINE_HANDLE (VkQueue, HANDLE_TYPE_QUEUE);
+VK_DEFINE_HANDLE (VkCmdBuffer, HANDLE_TYPE_CMD_BUFFER);
+VK_DEFINE_NONDISP_HANDLE (VkFence, HANDLE_TYPE_FENCE);
+VK_DEFINE_NONDISP_HANDLE (VkDeviceMemory, HANDLE_TYPE_DEVICE_MEMORY);
+VK_DEFINE_NONDISP_HANDLE (VkBuffer, HANDLE_TYPE_BUFFER);
+VK_DEFINE_NONDISP_HANDLE (VkImage, HANDLE_TYPE_IMAGE);
+VK_DEFINE_NONDISP_HANDLE (VkSemaphore, HANDLE_TYPE_SEMAPHORE);
+VK_DEFINE_NONDISP_HANDLE (VkEvent, HANDLE_TYPE_EVENT);
+VK_DEFINE_NONDISP_HANDLE (VkQueryPool, HANDLE_TYPE_QUERY_POOL);
+VK_DEFINE_NONDISP_HANDLE (VkBufferView, HANDLE_TYPE_BUFFER_VIEW);
+VK_DEFINE_NONDISP_HANDLE (VkImageView, HANDLE_TYPE_IMAGE_VIEW);
+VK_DEFINE_NONDISP_HANDLE (VkAttachmentView, HANDLE_TYPE_ATTACHMENT_VIEW);
+VK_DEFINE_NONDISP_HANDLE (VkShaderModule, HANDLE_TYPE_SHADER_MODULE);
+VK_DEFINE_NONDISP_HANDLE (VkShader, HANDLE_TYPE_SHADER);
+VK_DEFINE_NONDISP_HANDLE (VkPipelineCache, HANDLE_TYPE_PIPELINE_CACHE);
+VK_DEFINE_NONDISP_HANDLE (VkPipelineLayout, HANDLE_TYPE_PIPELINE_LAYOUT);
+VK_DEFINE_NONDISP_HANDLE (VkRenderPass, HANDLE_TYPE_RENDER_PASS);
+VK_DEFINE_NONDISP_HANDLE (VkPipeline, HANDLE_TYPE_PIPELINE);
+VK_DEFINE_NONDISP_HANDLE (VkDescriptorSetLayout, HANDLE_TYPE_DESCRIPTOR_SET_LAYOUT);
+VK_DEFINE_NONDISP_HANDLE (VkSampler, HANDLE_TYPE_SAMPLER);
+VK_DEFINE_NONDISP_HANDLE (VkDescriptorPool, HANDLE_TYPE_DESCRIPTOR_POOL);
+VK_DEFINE_NONDISP_HANDLE (VkDescriptorSet, HANDLE_TYPE_DESCRIPTOR_SET);
+VK_DEFINE_NONDISP_HANDLE (VkDynamicViewportState, HANDLE_TYPE_DYNAMIC_VIEWPORT_STATE);
+VK_DEFINE_NONDISP_HANDLE (VkDynamicRasterState, HANDLE_TYPE_DYNAMIC_RASTER_STATE);
+VK_DEFINE_NONDISP_HANDLE (VkDynamicColorBlendState, HANDLE_TYPE_DYNAMIC_COLOR_BLEND_STATE);
+VK_DEFINE_NONDISP_HANDLE (VkDynamicDepthStencilState, HANDLE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE);
+VK_DEFINE_NONDISP_HANDLE (VkFramebuffer, HANDLE_TYPE_FRAMEBUFFER);
+VK_DEFINE_NONDISP_HANDLE (VkCmdPool, HANDLE_TYPE_CMD_POOL);
+
+enum VkResult
+{
+ VK_SUCCESS = 0,
+ VK_UNSUPPORTED = 1,
+ VK_NOT_READY = 2,
+ VK_TIMEOUT = 3,
+ VK_EVENT_SET = 4,
+ VK_EVENT_RESET = 5,
+ VK_INCOMPLETE = 6,
+ VK_ERROR_UNKNOWN = -1,
+ VK_ERROR_UNAVAILABLE = -2,
+ VK_ERROR_INITIALIZATION_FAILED = -3,
+ VK_ERROR_OUT_OF_HOST_MEMORY = -4,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -5,
+ VK_ERROR_DEVICE_ALREADY_CREATED = -6,
+ VK_ERROR_DEVICE_LOST = -7,
+ VK_ERROR_INVALID_POINTER = -8,
+ VK_ERROR_INVALID_VALUE = -9,
+ VK_ERROR_INVALID_HANDLE = -10,
+ VK_ERROR_INVALID_ORDINAL = -11,
+ VK_ERROR_INVALID_MEMORY_SIZE = -12,
+ VK_ERROR_INVALID_EXTENSION = -13,
+ VK_ERROR_INVALID_FLAGS = -14,
+ VK_ERROR_INVALID_ALIGNMENT = -15,
+ VK_ERROR_INVALID_FORMAT = -16,
+ VK_ERROR_INVALID_IMAGE = -17,
+ VK_ERROR_INVALID_DESCRIPTOR_SET_DATA = -18,
+ VK_ERROR_INVALID_QUEUE_TYPE = -19,
+ VK_ERROR_UNSUPPORTED_SHADER_IL_VERSION = -20,
+ VK_ERROR_BAD_SHADER_CODE = -21,
+ VK_ERROR_BAD_PIPELINE_DATA = -22,
+ VK_ERROR_NOT_MAPPABLE = -23,
+ VK_ERROR_MEMORY_MAP_FAILED = -24,
+ VK_ERROR_MEMORY_UNMAP_FAILED = -25,
+ VK_ERROR_INCOMPATIBLE_DEVICE = -26,
+ VK_ERROR_INCOMPATIBLE_DRIVER = -27,
+ VK_ERROR_INCOMPLETE_COMMAND_BUFFER = -28,
+ VK_ERROR_BUILDING_COMMAND_BUFFER = -29,
+ VK_ERROR_MEMORY_NOT_BOUND = -30,
+ VK_ERROR_INCOMPATIBLE_QUEUE = -31,
+ VK_ERROR_INVALID_LAYER = -32,
+};
+
+enum VkStructureType
+{
+ VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 1,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO = 2,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 3,
+ VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO = 4,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 5,
+ VK_STRUCTURE_TYPE_SHADER_CREATE_INFO = 6,
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 7,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 8,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 9,
+ VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO = 10,
+ VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO = 11,
+ VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO = 12,
+ VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO = 13,
+ VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO = 14,
+ VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 15,
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 16,
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 17,
+ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 18,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 19,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 20,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 21,
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 22,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 23,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 24,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTER_STATE_CREATE_INFO = 25,
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 26,
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 27,
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 28,
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 29,
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 30,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 31,
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 32,
+ VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO = 33,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 34,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER = 35,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 36,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 37,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 38,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 39,
+ VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 40,
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 41,
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 42,
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 43,
+ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 44,
+ VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION = 45,
+ VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION = 46,
+ VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY = 47,
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 48,
+ VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO = 49,
+
+ VK_STRUCTURE_TYPE_LAST
+};
+
+enum VkSystemAllocType
+{
+ VK_SYSTEM_ALLOC_TYPE_API_OBJECT = 0,
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL = 1,
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL_TEMP = 2,
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL_SHADER = 3,
+ VK_SYSTEM_ALLOC_TYPE_DEBUG = 4,
+
+ VK_SYSTEM_ALLOC_TYPE_LAST
+};
+
+enum VkFormat
+{
+ VK_FORMAT_UNDEFINED = 0,
+ VK_FORMAT_R4G4_UNORM = 1,
+ VK_FORMAT_R4G4_USCALED = 2,
+ VK_FORMAT_R4G4B4A4_UNORM = 3,
+ VK_FORMAT_R4G4B4A4_USCALED = 4,
+ VK_FORMAT_R5G6B5_UNORM = 5,
+ VK_FORMAT_R5G6B5_USCALED = 6,
+ VK_FORMAT_R5G5B5A1_UNORM = 7,
+ VK_FORMAT_R5G5B5A1_USCALED = 8,
+ VK_FORMAT_R8_UNORM = 9,
+ VK_FORMAT_R8_SNORM = 10,
+ VK_FORMAT_R8_USCALED = 11,
+ VK_FORMAT_R8_SSCALED = 12,
+ VK_FORMAT_R8_UINT = 13,
+ VK_FORMAT_R8_SINT = 14,
+ VK_FORMAT_R8_SRGB = 15,
+ VK_FORMAT_R8G8_UNORM = 16,
+ VK_FORMAT_R8G8_SNORM = 17,
+ VK_FORMAT_R8G8_USCALED = 18,
+ VK_FORMAT_R8G8_SSCALED = 19,
+ VK_FORMAT_R8G8_UINT = 20,
+ VK_FORMAT_R8G8_SINT = 21,
+ VK_FORMAT_R8G8_SRGB = 22,
+ VK_FORMAT_R8G8B8_UNORM = 23,
+ VK_FORMAT_R8G8B8_SNORM = 24,
+ VK_FORMAT_R8G8B8_USCALED = 25,
+ VK_FORMAT_R8G8B8_SSCALED = 26,
+ VK_FORMAT_R8G8B8_UINT = 27,
+ VK_FORMAT_R8G8B8_SINT = 28,
+ VK_FORMAT_R8G8B8_SRGB = 29,
+ VK_FORMAT_R8G8B8A8_UNORM = 30,
+ VK_FORMAT_R8G8B8A8_SNORM = 31,
+ VK_FORMAT_R8G8B8A8_USCALED = 32,
+ VK_FORMAT_R8G8B8A8_SSCALED = 33,
+ VK_FORMAT_R8G8B8A8_UINT = 34,
+ VK_FORMAT_R8G8B8A8_SINT = 35,
+ VK_FORMAT_R8G8B8A8_SRGB = 36,
+ VK_FORMAT_R10G10B10A2_UNORM = 37,
+ VK_FORMAT_R10G10B10A2_SNORM = 38,
+ VK_FORMAT_R10G10B10A2_USCALED = 39,
+ VK_FORMAT_R10G10B10A2_SSCALED = 40,
+ VK_FORMAT_R10G10B10A2_UINT = 41,
+ VK_FORMAT_R10G10B10A2_SINT = 42,
+ VK_FORMAT_R16_UNORM = 43,
+ VK_FORMAT_R16_SNORM = 44,
+ VK_FORMAT_R16_USCALED = 45,
+ VK_FORMAT_R16_SSCALED = 46,
+ VK_FORMAT_R16_UINT = 47,
+ VK_FORMAT_R16_SINT = 48,
+ VK_FORMAT_R16_SFLOAT = 49,
+ VK_FORMAT_R16G16_UNORM = 50,
+ VK_FORMAT_R16G16_SNORM = 51,
+ VK_FORMAT_R16G16_USCALED = 52,
+ VK_FORMAT_R16G16_SSCALED = 53,
+ VK_FORMAT_R16G16_UINT = 54,
+ VK_FORMAT_R16G16_SINT = 55,
+ VK_FORMAT_R16G16_SFLOAT = 56,
+ VK_FORMAT_R16G16B16_UNORM = 57,
+ VK_FORMAT_R16G16B16_SNORM = 58,
+ VK_FORMAT_R16G16B16_USCALED = 59,
+ VK_FORMAT_R16G16B16_SSCALED = 60,
+ VK_FORMAT_R16G16B16_UINT = 61,
+ VK_FORMAT_R16G16B16_SINT = 62,
+ VK_FORMAT_R16G16B16_SFLOAT = 63,
+ VK_FORMAT_R16G16B16A16_UNORM = 64,
+ VK_FORMAT_R16G16B16A16_SNORM = 65,
+ VK_FORMAT_R16G16B16A16_USCALED = 66,
+ VK_FORMAT_R16G16B16A16_SSCALED = 67,
+ VK_FORMAT_R16G16B16A16_UINT = 68,
+ VK_FORMAT_R16G16B16A16_SINT = 69,
+ VK_FORMAT_R16G16B16A16_SFLOAT = 70,
+ VK_FORMAT_R32_UINT = 71,
+ VK_FORMAT_R32_SINT = 72,
+ VK_FORMAT_R32_SFLOAT = 73,
+ VK_FORMAT_R32G32_UINT = 74,
+ VK_FORMAT_R32G32_SINT = 75,
+ VK_FORMAT_R32G32_SFLOAT = 76,
+ VK_FORMAT_R32G32B32_UINT = 77,
+ VK_FORMAT_R32G32B32_SINT = 78,
+ VK_FORMAT_R32G32B32_SFLOAT = 79,
+ VK_FORMAT_R32G32B32A32_UINT = 80,
+ VK_FORMAT_R32G32B32A32_SINT = 81,
+ VK_FORMAT_R32G32B32A32_SFLOAT = 82,
+ VK_FORMAT_R64_SFLOAT = 83,
+ VK_FORMAT_R64G64_SFLOAT = 84,
+ VK_FORMAT_R64G64B64_SFLOAT = 85,
+ VK_FORMAT_R64G64B64A64_SFLOAT = 86,
+ VK_FORMAT_R11G11B10_UFLOAT = 87,
+ VK_FORMAT_R9G9B9E5_UFLOAT = 88,
+ VK_FORMAT_D16_UNORM = 89,
+ VK_FORMAT_D24_UNORM = 90,
+ VK_FORMAT_D32_SFLOAT = 91,
+ VK_FORMAT_S8_UINT = 92,
+ VK_FORMAT_D16_UNORM_S8_UINT = 93,
+ VK_FORMAT_D24_UNORM_S8_UINT = 94,
+ VK_FORMAT_D32_SFLOAT_S8_UINT = 95,
+ VK_FORMAT_BC1_RGB_UNORM = 96,
+ VK_FORMAT_BC1_RGB_SRGB = 97,
+ VK_FORMAT_BC1_RGBA_UNORM = 98,
+ VK_FORMAT_BC1_RGBA_SRGB = 99,
+ VK_FORMAT_BC2_UNORM = 100,
+ VK_FORMAT_BC2_SRGB = 101,
+ VK_FORMAT_BC3_UNORM = 102,
+ VK_FORMAT_BC3_SRGB = 103,
+ VK_FORMAT_BC4_UNORM = 104,
+ VK_FORMAT_BC4_SNORM = 105,
+ VK_FORMAT_BC5_UNORM = 106,
+ VK_FORMAT_BC5_SNORM = 107,
+ VK_FORMAT_BC6H_UFLOAT = 108,
+ VK_FORMAT_BC6H_SFLOAT = 109,
+ VK_FORMAT_BC7_UNORM = 110,
+ VK_FORMAT_BC7_SRGB = 111,
+ VK_FORMAT_ETC2_R8G8B8_UNORM = 112,
+ VK_FORMAT_ETC2_R8G8B8_SRGB = 113,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM = 114,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB = 115,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM = 116,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB = 117,
+ VK_FORMAT_EAC_R11_UNORM = 118,
+ VK_FORMAT_EAC_R11_SNORM = 119,
+ VK_FORMAT_EAC_R11G11_UNORM = 120,
+ VK_FORMAT_EAC_R11G11_SNORM = 121,
+ VK_FORMAT_ASTC_4x4_UNORM = 122,
+ VK_FORMAT_ASTC_4x4_SRGB = 123,
+ VK_FORMAT_ASTC_5x4_UNORM = 124,
+ VK_FORMAT_ASTC_5x4_SRGB = 125,
+ VK_FORMAT_ASTC_5x5_UNORM = 126,
+ VK_FORMAT_ASTC_5x5_SRGB = 127,
+ VK_FORMAT_ASTC_6x5_UNORM = 128,
+ VK_FORMAT_ASTC_6x5_SRGB = 129,
+ VK_FORMAT_ASTC_6x6_UNORM = 130,
+ VK_FORMAT_ASTC_6x6_SRGB = 131,
+ VK_FORMAT_ASTC_8x5_UNORM = 132,
+ VK_FORMAT_ASTC_8x5_SRGB = 133,
+ VK_FORMAT_ASTC_8x6_UNORM = 134,
+ VK_FORMAT_ASTC_8x6_SRGB = 135,
+ VK_FORMAT_ASTC_8x8_UNORM = 136,
+ VK_FORMAT_ASTC_8x8_SRGB = 137,
+ VK_FORMAT_ASTC_10x5_UNORM = 138,
+ VK_FORMAT_ASTC_10x5_SRGB = 139,
+ VK_FORMAT_ASTC_10x6_UNORM = 140,
+ VK_FORMAT_ASTC_10x6_SRGB = 141,
+ VK_FORMAT_ASTC_10x8_UNORM = 142,
+ VK_FORMAT_ASTC_10x8_SRGB = 143,
+ VK_FORMAT_ASTC_10x10_UNORM = 144,
+ VK_FORMAT_ASTC_10x10_SRGB = 145,
+ VK_FORMAT_ASTC_12x10_UNORM = 146,
+ VK_FORMAT_ASTC_12x10_SRGB = 147,
+ VK_FORMAT_ASTC_12x12_UNORM = 148,
+ VK_FORMAT_ASTC_12x12_SRGB = 149,
+ VK_FORMAT_B4G4R4A4_UNORM = 150,
+ VK_FORMAT_B5G5R5A1_UNORM = 151,
+ VK_FORMAT_B5G6R5_UNORM = 152,
+ VK_FORMAT_B5G6R5_USCALED = 153,
+ VK_FORMAT_B8G8R8_UNORM = 154,
+ VK_FORMAT_B8G8R8_SNORM = 155,
+ VK_FORMAT_B8G8R8_USCALED = 156,
+ VK_FORMAT_B8G8R8_SSCALED = 157,
+ VK_FORMAT_B8G8R8_UINT = 158,
+ VK_FORMAT_B8G8R8_SINT = 159,
+ VK_FORMAT_B8G8R8_SRGB = 160,
+ VK_FORMAT_B8G8R8A8_UNORM = 161,
+ VK_FORMAT_B8G8R8A8_SNORM = 162,
+ VK_FORMAT_B8G8R8A8_USCALED = 163,
+ VK_FORMAT_B8G8R8A8_SSCALED = 164,
+ VK_FORMAT_B8G8R8A8_UINT = 165,
+ VK_FORMAT_B8G8R8A8_SINT = 166,
+ VK_FORMAT_B8G8R8A8_SRGB = 167,
+ VK_FORMAT_B10G10R10A2_UNORM = 168,
+ VK_FORMAT_B10G10R10A2_SNORM = 169,
+ VK_FORMAT_B10G10R10A2_USCALED = 170,
+ VK_FORMAT_B10G10R10A2_SSCALED = 171,
+ VK_FORMAT_B10G10R10A2_UINT = 172,
+ VK_FORMAT_B10G10R10A2_SINT = 173,
+
+ VK_FORMAT_LAST
+};
+
+enum VkImageType
+{
+ VK_IMAGE_TYPE_1D = 0,
+ VK_IMAGE_TYPE_2D = 1,
+ VK_IMAGE_TYPE_3D = 2,
+
+ VK_IMAGE_TYPE_LAST
+};
+
+enum VkImageTiling
+{
+ VK_IMAGE_TILING_LINEAR = 0,
+ VK_IMAGE_TILING_OPTIMAL = 1,
+
+ VK_IMAGE_TILING_LAST
+};
+
+enum VkPhysicalDeviceType
+{
+ VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,
+ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,
+ VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,
+ VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,
+ VK_PHYSICAL_DEVICE_TYPE_CPU = 4,
+
+ VK_PHYSICAL_DEVICE_TYPE_LAST
+};
+
+enum VkImageAspect
+{
+ VK_IMAGE_ASPECT_COLOR = 0,
+ VK_IMAGE_ASPECT_DEPTH = 1,
+ VK_IMAGE_ASPECT_STENCIL = 2,
+ VK_IMAGE_ASPECT_METADATA = 3,
+
+ VK_IMAGE_ASPECT_LAST
+};
+
+enum VkQueryType
+{
+ VK_QUERY_TYPE_OCCLUSION = 0,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+
+ VK_QUERY_TYPE_LAST
+};
+
+enum VkSharingMode
+{
+ VK_SHARING_MODE_EXCLUSIVE = 0,
+ VK_SHARING_MODE_CONCURRENT = 1,
+
+ VK_SHARING_MODE_LAST
+};
+
+enum VkBufferViewType
+{
+ VK_BUFFER_VIEW_TYPE_RAW = 0,
+ VK_BUFFER_VIEW_TYPE_FORMATTED = 1,
+
+ VK_BUFFER_VIEW_TYPE_LAST
+};
+
+enum VkImageViewType
+{
+ VK_IMAGE_VIEW_TYPE_1D = 0,
+ VK_IMAGE_VIEW_TYPE_2D = 1,
+ VK_IMAGE_VIEW_TYPE_3D = 2,
+ VK_IMAGE_VIEW_TYPE_CUBE = 3,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6,
+
+ VK_IMAGE_VIEW_TYPE_LAST
+};
+
+enum VkChannelSwizzle
+{
+ VK_CHANNEL_SWIZZLE_ZERO = 0,
+ VK_CHANNEL_SWIZZLE_ONE = 1,
+ VK_CHANNEL_SWIZZLE_R = 2,
+ VK_CHANNEL_SWIZZLE_G = 3,
+ VK_CHANNEL_SWIZZLE_B = 4,
+ VK_CHANNEL_SWIZZLE_A = 5,
+
+ VK_CHANNEL_SWIZZLE_LAST
+};
+
+enum VkShaderStage
+{
+ VK_SHADER_STAGE_VERTEX = 0,
+ VK_SHADER_STAGE_TESS_CONTROL = 1,
+ VK_SHADER_STAGE_TESS_EVALUATION = 2,
+ VK_SHADER_STAGE_GEOMETRY = 3,
+ VK_SHADER_STAGE_FRAGMENT = 4,
+ VK_SHADER_STAGE_COMPUTE = 5,
+
+ VK_SHADER_STAGE_LAST
+};
+
+enum VkVertexInputStepRate
+{
+ VK_VERTEX_INPUT_STEP_RATE_VERTEX = 0,
+ VK_VERTEX_INPUT_STEP_RATE_INSTANCE = 1,
+
+ VK_VERTEX_INPUT_STEP_RATE_LAST
+};
+
+enum VkPrimitiveTopology
+{
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ = 6,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ = 7,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ = 8,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ = 9,
+ VK_PRIMITIVE_TOPOLOGY_PATCH = 10,
+
+ VK_PRIMITIVE_TOPOLOGY_LAST
+};
+
+enum VkFillMode
+{
+ VK_FILL_MODE_POINTS = 0,
+ VK_FILL_MODE_WIREFRAME = 1,
+ VK_FILL_MODE_SOLID = 2,
+
+ VK_FILL_MODE_LAST
+};
+
+enum VkCullMode
+{
+ VK_CULL_MODE_NONE = 0,
+ VK_CULL_MODE_FRONT = 1,
+ VK_CULL_MODE_BACK = 2,
+ VK_CULL_MODE_FRONT_AND_BACK = 3,
+
+ VK_CULL_MODE_LAST
+};
+
+enum VkFrontFace
+{
+ VK_FRONT_FACE_CCW = 0,
+ VK_FRONT_FACE_CW = 1,
+
+ VK_FRONT_FACE_LAST
+};
+
+enum VkCompareOp
+{
+ VK_COMPARE_OP_NEVER = 0,
+ VK_COMPARE_OP_LESS = 1,
+ VK_COMPARE_OP_EQUAL = 2,
+ VK_COMPARE_OP_LESS_EQUAL = 3,
+ VK_COMPARE_OP_GREATER = 4,
+ VK_COMPARE_OP_NOT_EQUAL = 5,
+ VK_COMPARE_OP_GREATER_EQUAL = 6,
+ VK_COMPARE_OP_ALWAYS = 7,
+
+ VK_COMPARE_OP_LAST
+};
+
+enum VkStencilOp
+{
+ VK_STENCIL_OP_KEEP = 0,
+ VK_STENCIL_OP_ZERO = 1,
+ VK_STENCIL_OP_REPLACE = 2,
+ VK_STENCIL_OP_INC_CLAMP = 3,
+ VK_STENCIL_OP_DEC_CLAMP = 4,
+ VK_STENCIL_OP_INVERT = 5,
+ VK_STENCIL_OP_INC_WRAP = 6,
+ VK_STENCIL_OP_DEC_WRAP = 7,
+
+ VK_STENCIL_OP_LAST
+};
+
+enum VkLogicOp
+{
+ VK_LOGIC_OP_CLEAR = 0,
+ VK_LOGIC_OP_AND = 1,
+ VK_LOGIC_OP_AND_REVERSE = 2,
+ VK_LOGIC_OP_COPY = 3,
+ VK_LOGIC_OP_AND_INVERTED = 4,
+ VK_LOGIC_OP_NOOP = 5,
+ VK_LOGIC_OP_XOR = 6,
+ VK_LOGIC_OP_OR = 7,
+ VK_LOGIC_OP_NOR = 8,
+ VK_LOGIC_OP_EQUIV = 9,
+ VK_LOGIC_OP_INVERT = 10,
+ VK_LOGIC_OP_OR_REVERSE = 11,
+ VK_LOGIC_OP_COPY_INVERTED = 12,
+ VK_LOGIC_OP_OR_INVERTED = 13,
+ VK_LOGIC_OP_NAND = 14,
+ VK_LOGIC_OP_SET = 15,
+
+ VK_LOGIC_OP_LAST
+};
+
+enum VkBlend
+{
+ VK_BLEND_ZERO = 0,
+ VK_BLEND_ONE = 1,
+ VK_BLEND_SRC_COLOR = 2,
+ VK_BLEND_ONE_MINUS_SRC_COLOR = 3,
+ VK_BLEND_DEST_COLOR = 4,
+ VK_BLEND_ONE_MINUS_DEST_COLOR = 5,
+ VK_BLEND_SRC_ALPHA = 6,
+ VK_BLEND_ONE_MINUS_SRC_ALPHA = 7,
+ VK_BLEND_DEST_ALPHA = 8,
+ VK_BLEND_ONE_MINUS_DEST_ALPHA = 9,
+ VK_BLEND_CONSTANT_COLOR = 10,
+ VK_BLEND_ONE_MINUS_CONSTANT_COLOR = 11,
+ VK_BLEND_CONSTANT_ALPHA = 12,
+ VK_BLEND_ONE_MINUS_CONSTANT_ALPHA = 13,
+ VK_BLEND_SRC_ALPHA_SATURATE = 14,
+ VK_BLEND_SRC1_COLOR = 15,
+ VK_BLEND_ONE_MINUS_SRC1_COLOR = 16,
+ VK_BLEND_SRC1_ALPHA = 17,
+ VK_BLEND_ONE_MINUS_SRC1_ALPHA = 18,
+
+ VK_BLEND_LAST
+};
+
+enum VkBlendOp
+{
+ VK_BLEND_OP_ADD = 0,
+ VK_BLEND_OP_SUBTRACT = 1,
+ VK_BLEND_OP_REVERSE_SUBTRACT = 2,
+ VK_BLEND_OP_MIN = 3,
+ VK_BLEND_OP_MAX = 4,
+
+ VK_BLEND_OP_LAST
+};
+
+enum VkTexFilter
+{
+ VK_TEX_FILTER_NEAREST = 0,
+ VK_TEX_FILTER_LINEAR = 1,
+
+ VK_TEX_FILTER_LAST
+};
+
+enum VkTexMipmapMode
+{
+ VK_TEX_MIPMAP_MODE_BASE = 0,
+ VK_TEX_MIPMAP_MODE_NEAREST = 1,
+ VK_TEX_MIPMAP_MODE_LINEAR = 2,
+
+ VK_TEX_MIPMAP_MODE_LAST
+};
+
+enum VkTexAddress
+{
+ VK_TEX_ADDRESS_WRAP = 0,
+ VK_TEX_ADDRESS_MIRROR = 1,
+ VK_TEX_ADDRESS_CLAMP = 2,
+ VK_TEX_ADDRESS_MIRROR_ONCE = 3,
+ VK_TEX_ADDRESS_CLAMP_BORDER = 4,
+
+ VK_TEX_ADDRESS_LAST
+};
+
+enum VkBorderColor
+{
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
+
+ VK_BORDER_COLOR_LAST
+};
+
+enum VkDescriptorType
+{
+ VK_DESCRIPTOR_TYPE_SAMPLER = 0,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,
+ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,
+ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,
+ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
+ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
+
+ VK_DESCRIPTOR_TYPE_LAST
+};
+
+enum VkDescriptorPoolUsage
+{
+ VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT = 0,
+ VK_DESCRIPTOR_POOL_USAGE_DYNAMIC = 1,
+
+ VK_DESCRIPTOR_POOL_USAGE_LAST
+};
+
+enum VkDescriptorSetUsage
+{
+ VK_DESCRIPTOR_SET_USAGE_ONE_SHOT = 0,
+ VK_DESCRIPTOR_SET_USAGE_STATIC = 1,
+
+ VK_DESCRIPTOR_SET_USAGE_LAST
+};
+
+enum VkImageLayout
+{
+ VK_IMAGE_LAYOUT_UNDEFINED = 0,
+ VK_IMAGE_LAYOUT_GENERAL = 1,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
+ VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL = 6,
+ VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL = 7,
+
+ VK_IMAGE_LAYOUT_LAST
+};
+
+enum VkAttachmentLoadOp
+{
+ VK_ATTACHMENT_LOAD_OP_LOAD = 0,
+ VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
+
+ VK_ATTACHMENT_LOAD_OP_LAST
+};
+
+enum VkAttachmentStoreOp
+{
+ VK_ATTACHMENT_STORE_OP_STORE = 0,
+ VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
+
+ VK_ATTACHMENT_STORE_OP_LAST
+};
+
+enum VkPipelineBindPoint
+{
+ VK_PIPELINE_BIND_POINT_COMPUTE = 0,
+ VK_PIPELINE_BIND_POINT_GRAPHICS = 1,
+
+ VK_PIPELINE_BIND_POINT_LAST
+};
+
+enum VkCmdBufferLevel
+{
+ VK_CMD_BUFFER_LEVEL_PRIMARY = 0,
+ VK_CMD_BUFFER_LEVEL_SECONDARY = 1,
+
+ VK_CMD_BUFFER_LEVEL_LAST
+};
+
+enum VkIndexType
+{
+ VK_INDEX_TYPE_UINT16 = 0,
+ VK_INDEX_TYPE_UINT32 = 1,
+
+ VK_INDEX_TYPE_LAST
+};
+
+enum VkTimestampType
+{
+ VK_TIMESTAMP_TYPE_TOP = 0,
+ VK_TIMESTAMP_TYPE_BOTTOM = 1,
+
+ VK_TIMESTAMP_TYPE_LAST
+};
+
+enum VkRenderPassContents
+{
+ VK_RENDER_PASS_CONTENTS_INLINE = 0,
+ VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS = 1,
+
+ VK_RENDER_PASS_CONTENTS_LAST
+};
+
+enum VkFormatFeatureFlagBits
+{
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,
+ VK_FORMAT_FEATURE_CONVERSION_BIT = 0x00000400,
+};
+typedef deUint32 VkFormatFeatureFlags;
+
+enum VkImageUsageFlagBits
+{
+ VK_IMAGE_USAGE_GENERAL = 0,
+ VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT = 0x00000001,
+ VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT = 0x00000002,
+ VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,
+ VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_BIT = 0x00000020,
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,
+};
+typedef deUint32 VkImageUsageFlags;
+
+enum VkQueueFlagBits
+{
+ VK_QUEUE_GRAPHICS_BIT = 0x00000001,
+ VK_QUEUE_COMPUTE_BIT = 0x00000002,
+ VK_QUEUE_DMA_BIT = 0x00000004,
+ VK_QUEUE_SPARSE_MEMMGR_BIT = 0x00000008,
+ VK_QUEUE_EXTENDED_BIT = 0x40000000,
+};
+typedef deUint32 VkQueueFlags;
+
+enum VkMemoryPropertyFlagBits
+{
+ VK_MEMORY_PROPERTY_DEVICE_ONLY = 0,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000001,
+ VK_MEMORY_PROPERTY_HOST_NON_COHERENT_BIT = 0x00000002,
+ VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT = 0x00000004,
+ VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT = 0x00000008,
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
+};
+typedef deUint32 VkMemoryPropertyFlags;
+
+enum VkMemoryHeapFlagBits
+{
+ VK_MEMORY_HEAP_HOST_LOCAL = 0x00000001,
+};
+typedef deUint32 VkMemoryHeapFlags;
+
+enum VkDeviceCreateFlagBits
+{
+ VK_DEVICE_CREATE_VALIDATION_BIT = 0x00000001,
+};
+typedef deUint32 VkDeviceCreateFlags;
+
+enum VkSparseImageFormatFlagBits
+{
+ VK_SPARSE_IMAGE_FMT_SINGLE_MIPTAIL_BIT = 0x00000001,
+ VK_SPARSE_IMAGE_FMT_ALIGNED_MIP_SIZE_BIT = 0x00000002,
+ VK_SPARSE_IMAGE_FMT_NONSTD_BLOCK_SIZE_BIT = 0x00000004,
+};
+typedef deUint32 VkSparseImageFormatFlags;
+
+enum VkSparseMemoryBindFlagBits
+{
+ VK_SPARSE_MEMORY_BIND_REPLICATE_64KIB_BLOCK_BIT = 0x00000001,
+};
+typedef deUint32 VkSparseMemoryBindFlags;
+
+enum VkFenceCreateFlagBits
+{
+ VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
+};
+typedef deUint32 VkFenceCreateFlags;
+
+enum VkQueryPipelineStatisticFlagBits
+{
+ VK_QUERY_PIPELINE_STATISTIC_IA_VERTICES_BIT = 0x00000001,
+ VK_QUERY_PIPELINE_STATISTIC_IA_PRIMITIVES_BIT = 0x00000002,
+ VK_QUERY_PIPELINE_STATISTIC_VS_INVOCATIONS_BIT = 0x00000004,
+ VK_QUERY_PIPELINE_STATISTIC_GS_INVOCATIONS_BIT = 0x00000008,
+ VK_QUERY_PIPELINE_STATISTIC_GS_PRIMITIVES_BIT = 0x00000010,
+ VK_QUERY_PIPELINE_STATISTIC_C_INVOCATIONS_BIT = 0x00000020,
+ VK_QUERY_PIPELINE_STATISTIC_C_PRIMITIVES_BIT = 0x00000040,
+ VK_QUERY_PIPELINE_STATISTIC_FS_INVOCATIONS_BIT = 0x00000080,
+ VK_QUERY_PIPELINE_STATISTIC_TCS_PATCHES_BIT = 0x00000100,
+ VK_QUERY_PIPELINE_STATISTIC_TES_INVOCATIONS_BIT = 0x00000200,
+ VK_QUERY_PIPELINE_STATISTIC_CS_INVOCATIONS_BIT = 0x00000400,
+};
+typedef deUint32 VkQueryPipelineStatisticFlags;
+
+enum VkQueryResultFlagBits
+{
+ VK_QUERY_RESULT_DEFAULT = 0,
+ VK_QUERY_RESULT_64_BIT = 0x00000001,
+ VK_QUERY_RESULT_WAIT_BIT = 0x00000002,
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,
+ VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,
+};
+typedef deUint32 VkQueryResultFlags;
+
+enum VkBufferUsageFlagBits
+{
+ VK_BUFFER_USAGE_GENERAL = 0,
+ VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT = 0x00000001,
+ VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT = 0x00000002,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,
+};
+typedef deUint32 VkBufferUsageFlags;
+
+enum VkBufferCreateFlagBits
+{
+ VK_BUFFER_CREATE_SPARSE_BIT = 0x00000001,
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+};
+typedef deUint32 VkBufferCreateFlags;
+
+enum VkImageCreateFlagBits
+{
+ VK_IMAGE_CREATE_SPARSE_BIT = 0x00000001,
+ VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_IMAGE_CREATE_INVARIANT_DATA_BIT = 0x00000008,
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000010,
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000020,
+};
+typedef deUint32 VkImageCreateFlags;
+
+enum VkAttachmentViewCreateFlagBits
+{
+ VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_DEPTH_BIT = 0x00000001,
+ VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_STENCIL_BIT = 0x00000002,
+};
+typedef deUint32 VkAttachmentViewCreateFlags;
+
+enum VkChannelFlagBits
+{
+ VK_CHANNEL_R_BIT = 0x00000001,
+ VK_CHANNEL_G_BIT = 0x00000002,
+ VK_CHANNEL_B_BIT = 0x00000004,
+ VK_CHANNEL_A_BIT = 0x00000008,
+};
+typedef deUint32 VkChannelFlags;
+
+enum VkPipelineCreateFlagBits
+{
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
+};
+typedef deUint32 VkPipelineCreateFlags;
+
+enum VkShaderStageFlagBits
+{
+ VK_SHADER_STAGE_VERTEX_BIT = 0x00000001,
+ VK_SHADER_STAGE_TESS_CONTROL_BIT = 0x00000002,
+ VK_SHADER_STAGE_TESS_EVALUATION_BIT = 0x00000004,
+ VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008,
+ VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010,
+ VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,
+ VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
+};
+typedef deUint32 VkShaderStageFlags;
+
+enum VkSubpassDescriptionFlagBits
+{
+ VK_SUBPASS_DESCRIPTION_NO_OVERDRAW_BIT = 0x00000001,
+};
+typedef deUint32 VkSubpassDescriptionFlags;
+
+enum VkPipelineStageFlagBits
+{
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,
+ VK_PIPELINE_STAGE_TESS_CONTROL_SHADER_BIT = 0x00000010,
+ VK_PIPELINE_STAGE_TESS_EVALUATION_SHADER_BIT = 0x00000020,
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,
+ VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,
+ VK_PIPELINE_STAGE_TRANSITION_BIT = 0x00002000,
+ VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS = 0x000007FF,
+ VK_PIPELINE_STAGE_ALL_GPU_COMMANDS = 0x00003FFF,
+};
+typedef deUint32 VkPipelineStageFlags;
+
+enum VkMemoryOutputFlagBits
+{
+ VK_MEMORY_OUTPUT_HOST_WRITE_BIT = 0x00000001,
+ VK_MEMORY_OUTPUT_SHADER_WRITE_BIT = 0x00000002,
+ VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT = 0x00000004,
+ VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000008,
+ VK_MEMORY_OUTPUT_TRANSFER_BIT = 0x00000010,
+};
+typedef deUint32 VkMemoryOutputFlags;
+
+enum VkMemoryInputFlagBits
+{
+ VK_MEMORY_INPUT_HOST_READ_BIT = 0x00000001,
+ VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT = 0x00000002,
+ VK_MEMORY_INPUT_INDEX_FETCH_BIT = 0x00000004,
+ VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT = 0x00000008,
+ VK_MEMORY_INPUT_UNIFORM_READ_BIT = 0x00000010,
+ VK_MEMORY_INPUT_SHADER_READ_BIT = 0x00000020,
+ VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT = 0x00000040,
+ VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000080,
+ VK_MEMORY_INPUT_INPUT_ATTACHMENT_BIT = 0x00000100,
+ VK_MEMORY_INPUT_TRANSFER_BIT = 0x00000200,
+};
+typedef deUint32 VkMemoryInputFlags;
+
+enum VkCmdPoolCreateFlagBits
+{
+ VK_CMD_POOL_CREATE_TRANSIENT_BIT = 0x00000001,
+ VK_CMD_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,
+};
+typedef deUint32 VkCmdPoolCreateFlags;
+
+enum VkCmdPoolResetFlagBits
+{
+ VK_CMD_POOL_RESET_RELEASE_RESOURCES = 0x00000001,
+};
+typedef deUint32 VkCmdPoolResetFlags;
+
+enum VkCmdBufferOptimizeFlagBits
+{
+ VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT = 0x00000001,
+ VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT = 0x00000002,
+ VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT = 0x00000004,
+ VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT = 0x00000008,
+ VK_CMD_BUFFER_OPTIMIZE_NO_SIMULTANEOUS_USE_BIT = 0x00000010,
+};
+typedef deUint32 VkCmdBufferOptimizeFlags;
+
+enum VkCmdBufferResetFlagBits
+{
+ VK_CMD_BUFFER_RESET_RELEASE_RESOURCES = 0x00000001,
+};
+typedef deUint32 VkCmdBufferResetFlags;
+
+enum VkImageAspectFlagBits
+{
+ VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001,
+ VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,
+ VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004,
+ VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008,
+};
+typedef deUint32 VkImageAspectFlags;
+
+enum VkQueryControlFlagBits
+{
+ VK_QUERY_CONTROL_CONSERVATIVE_BIT = 0x00000001,
+};
+typedef deUint32 VkQueryControlFlags;
+
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Program binary registry.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkBinaryRegistry.hpp"
+#include "tcuResource.hpp"
+#include "deFilePath.hpp"
+#include "deStringUtil.hpp"
+
+#include <fstream>
+#include <sstream>
+
+namespace vk
+{
+
+using std::string;
+using std::vector;
+
+static string getProgramPath (const ProgramIdentifier& id)
+{
+ const vector<string> casePathComps = de::splitString(id.testCasePath, '.');
+ std::ostringstream path;
+
+ for (size_t compNdx = 0; compNdx < casePathComps.size(); compNdx++)
+ path << casePathComps[compNdx] << '/';
+
+ path << id.programName << ".spv";
+
+ return path.str();
+}
+
+// BinaryRegistryWriter
+
+BinaryRegistryWriter::BinaryRegistryWriter (const std::string& dstPath)
+ : m_dstPath(dstPath)
+{
+}
+
+BinaryRegistryWriter::~BinaryRegistryWriter (void)
+{
+}
+
+void BinaryRegistryWriter::storeProgram (const ProgramIdentifier& id, const ProgramBinary& binary)
+{
+ const de::FilePath fullPath = de::FilePath::join(m_dstPath, getProgramPath(id));
+
+ if (!de::FilePath(fullPath.getDirName()).exists())
+ de::createDirectoryAndParents(fullPath.getDirName().c_str());
+
+ {
+ std::ofstream out (fullPath.getPath(), std::ios_base::binary);
+
+ if (!out.is_open() || !out.good())
+ throw tcu::Exception("Failed to open " + string(fullPath.getPath()));
+
+ out.write((const char*)binary.getBinary(), binary.getSize());
+ out.close();
+ }
+}
+
+// BinaryRegistryReader
+
+BinaryRegistryReader::BinaryRegistryReader (const tcu::Archive& archive, const std::string& srcPath)
+ : m_archive (archive)
+ , m_srcPath (srcPath)
+{
+}
+
+BinaryRegistryReader::~BinaryRegistryReader (void)
+{
+}
+
+ProgramBinary* BinaryRegistryReader::loadProgram (const ProgramIdentifier& id) const
+{
+ const string fullPath = de::FilePath::join(m_srcPath, getProgramPath(id)).getPath();
+
+ try
+ {
+ de::UniquePtr<tcu::Resource> progRes (m_archive.getResource(fullPath.c_str()));
+ const int progSize = progRes->getSize();
+ vector<deUint8> bytes (progSize);
+
+ TCU_CHECK_INTERNAL(!bytes.empty());
+
+ progRes->read(&bytes[0], progSize);
+
+ return new ProgramBinary(vk::PROGRAM_FORMAT_SPIRV, bytes.size(), &bytes[0]);
+ }
+ catch (const tcu::ResourceError&)
+ {
+ throw ProgramNotFoundException(id);
+ }
+}
+
+
+} // vk
--- /dev/null
+#ifndef _VKBINARYREGISTRY_HPP
+#define _VKBINARYREGISTRY_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Program binary registry.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkPrograms.hpp"
+
+namespace tcu
+{
+class Archive;
+}
+
+namespace vk
+{
+
+struct ProgramIdentifier
+{
+ std::string testCasePath;
+ std::string programName;
+
+ ProgramIdentifier (const std::string& testCasePath_, const std::string& programName_)
+ : testCasePath (testCasePath_)
+ , programName (programName_)
+ {
+ }
+};
+
+class ProgramNotFoundException : public tcu::ResourceError
+{
+public:
+ ProgramNotFoundException (const ProgramIdentifier& id)
+ : tcu::ResourceError("Program " + id.testCasePath + " / '" + id.programName + "' not found")
+ {
+ }
+};
+
+class BinaryRegistryReader
+{
+public:
+ BinaryRegistryReader (const tcu::Archive& archive, const std::string& srcPath);
+ ~BinaryRegistryReader (void);
+
+ ProgramBinary* loadProgram (const ProgramIdentifier& id) const;
+
+private:
+ const tcu::Archive& m_archive;
+ const std::string m_srcPath;
+};
+
+class BinaryRegistryWriter
+{
+public:
+ BinaryRegistryWriter (const std::string& dstPath);
+ ~BinaryRegistryWriter (void);
+
+ void storeProgram (const ProgramIdentifier& id, const ProgramBinary& binary);
+
+private:
+ const std::string m_dstPath;
+};
+
+} // vk
+
+#endif // _VKBINARYREGISTRY_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan object builder utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkBuilderUtil.hpp"
+
+#include "vkRefUtil.hpp"
+
+namespace vk
+{
+
+// DescriptorSetLayoutBuilder
+
+DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder (void)
+{
+}
+
+DescriptorSetLayoutBuilder& DescriptorSetLayoutBuilder::addBinding (VkDescriptorType descriptorType,
+ deUint32 arraySize,
+ VkShaderStageFlags stageFlags,
+ const VkSampler* pImmutableSamplers)
+{
+ const VkDescriptorSetLayoutBinding binding =
+ {
+ descriptorType, //!< descriptorType
+ arraySize, //!< arraySize
+ stageFlags, //!< stageFlags
+ pImmutableSamplers, //!< pImmutableSamplers
+ };
+ m_bindings.push_back(binding);
+ return *this;
+}
+
+Move<VkDescriptorSetLayout> DescriptorSetLayoutBuilder::build (const DeviceInterface& vk, VkDevice device) const
+{
+ const VkDescriptorSetLayoutBinding* const bindingPtr = (m_bindings.empty()) ? (DE_NULL) : (&m_bindings[0]);
+ const VkDescriptorSetLayoutCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ (deUint32)m_bindings.size(), //!< count
+ bindingPtr, //!< pBinding
+ };
+
+ return createDescriptorSetLayout(vk, device, &createInfo);
+}
+
+// DescriptorPoolBuilder
+
+DescriptorPoolBuilder::DescriptorPoolBuilder (void)
+{
+}
+
+DescriptorPoolBuilder& DescriptorPoolBuilder::addType (VkDescriptorType type, deUint32 numDescriptors)
+{
+ if (numDescriptors == 0u)
+ {
+ // nothing to do
+ return *this;
+ }
+ else
+ {
+ for (size_t ndx = 0; ndx < m_counts.size(); ++ndx)
+ {
+ if (m_counts[ndx].type == type)
+ {
+ // augment existing requirement
+ m_counts[ndx].count += numDescriptors;
+ return *this;
+ }
+ }
+
+ {
+ // new requirement
+ const VkDescriptorTypeCount typeCount =
+ {
+ type, //!< type
+ numDescriptors, //!< count
+ };
+
+ m_counts.push_back(typeCount);
+ return *this;
+ }
+ }
+}
+
+Move<VkDescriptorPool> DescriptorPoolBuilder::build (const DeviceInterface& vk, VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets) const
+{
+ const VkDescriptorTypeCount* const typeCountPtr = (m_counts.empty()) ? (DE_NULL) : (&m_counts[0]);
+ const VkDescriptorPoolCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ DE_NULL,
+ (deUint32)m_counts.size(), //!< count
+ typeCountPtr, //!< pTypeCount
+ };
+
+ return createDescriptorPool(vk, device, poolUsage, maxSets, &createInfo);
+}
+
+// DescriptorSetUpdateBuilder
+
+DescriptorSetUpdateBuilder::DescriptorSetUpdateBuilder (void)
+{
+}
+
+DescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::write (VkDescriptorSet destSet,
+ deUint32 destBinding,
+ deUint32 destArrayElement,
+ deUint32 count,
+ VkDescriptorType descriptorType,
+ const VkDescriptorInfo* pDescriptors)
+{
+ const VkWriteDescriptorSet writeParams =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ destSet, //!< destSet
+ destBinding, //!< destBinding
+ destArrayElement, //!< destArrayElement
+ count, //!< count
+ descriptorType, //!< descriptorType
+ pDescriptors, //!< pDescriptors
+ };
+ m_writes.push_back(writeParams);
+ return *this;
+}
+
+DescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::copy (VkDescriptorSet srcSet,
+ deUint32 srcBinding,
+ deUint32 srcArrayElement,
+ VkDescriptorSet destSet,
+ deUint32 destBinding,
+ deUint32 destArrayElement,
+ deUint32 count)
+{
+ const VkCopyDescriptorSet copyParams =
+ {
+ VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET,
+ DE_NULL,
+ srcSet, //!< srcSet
+ srcBinding, //!< srcBinding
+ srcArrayElement, //!< srcArrayElement
+ destSet, //!< destSet
+ destBinding, //!< destBinding
+ destArrayElement, //!< destArrayElement
+ count, //!< count
+ };
+ m_copies.push_back(copyParams);
+ return *this;
+}
+
+void DescriptorSetUpdateBuilder::update (const DeviceInterface& vk, VkDevice device) const
+{
+ const VkWriteDescriptorSet* const writePtr = (m_writes.empty()) ? (DE_NULL) : (&m_writes[0]);
+ const VkCopyDescriptorSet* const copyPtr = (m_copies.empty()) ? (DE_NULL) : (&m_copies[0]);
+
+ VK_CHECK(vk.updateDescriptorSets(device, (deUint32)m_writes.size(), writePtr, (deUint32)m_copies.size(), copyPtr));
+}
+
+} // vk
--- /dev/null
+#ifndef _VKBUILDERUTIL_HPP
+#define _VKBUILDERUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan object builder utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkRef.hpp"
+
+#include <vector>
+
+namespace vk
+{
+
+class DescriptorSetLayoutBuilder
+{
+public:
+ DescriptorSetLayoutBuilder (void);
+
+ DescriptorSetLayoutBuilder& addBinding (VkDescriptorType descriptorType,
+ deUint32 arraySize,
+ VkShaderStageFlags stageFlags,
+ const VkSampler* pImmutableSamplers);
+
+ Move<VkDescriptorSetLayout> build (const DeviceInterface& vk, VkDevice device) const;
+
+ // helpers
+
+ inline DescriptorSetLayoutBuilder& addSingleBinding (VkDescriptorType descriptorType,
+ VkShaderStageFlags stageFlags)
+ {
+ return addBinding(descriptorType, 1u, stageFlags, (VkSampler*)DE_NULL);
+ }
+ inline DescriptorSetLayoutBuilder& addArrayBinding (VkDescriptorType descriptorType,
+ deUint32 arraySize,
+ VkShaderStageFlags stageFlags)
+ {
+ return addBinding(descriptorType, arraySize, stageFlags, (VkSampler*)DE_NULL);
+ }
+ inline DescriptorSetLayoutBuilder& addSingleSamplerBinding (VkDescriptorType descriptorType,
+ VkShaderStageFlags stageFlags,
+ const VkSampler* immutableSampler) //!< \note: Using pointer to sampler to clarify that handle is not
+ //!< copied and argument lifetime is expected to cover build()
+ //!< call.
+ {
+ return addBinding(descriptorType, 1u, stageFlags, immutableSampler);
+ }
+ inline DescriptorSetLayoutBuilder& addArraySamplerBinding (VkDescriptorType descriptorType,
+ deUint32 arraySize,
+ VkShaderStageFlags stageFlags,
+ const VkSampler* pImmutableSamplers)
+ {
+ return addBinding(descriptorType, arraySize, stageFlags, pImmutableSamplers);
+ }
+
+private:
+ DescriptorSetLayoutBuilder (const DescriptorSetLayoutBuilder&); // delete
+ DescriptorSetLayoutBuilder& operator= (const DescriptorSetLayoutBuilder&); // delete
+
+ std::vector<VkDescriptorSetLayoutBinding> m_bindings;
+};
+
+class DescriptorPoolBuilder
+{
+public:
+ DescriptorPoolBuilder (void);
+
+ DescriptorPoolBuilder& addType (VkDescriptorType type, deUint32 numDescriptors = 1u);
+ Move<VkDescriptorPool> build (const DeviceInterface& vk, VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets) const;
+
+private:
+ DescriptorPoolBuilder (const DescriptorPoolBuilder&); // delete
+ DescriptorPoolBuilder& operator= (const DescriptorPoolBuilder&); // delete
+
+ std::vector<VkDescriptorTypeCount> m_counts;
+};
+
+class DescriptorSetUpdateBuilder
+{
+public:
+ class Location
+ {
+ public:
+ static inline Location binding (deUint32 binding_)
+ {
+ return Location(binding_, 0u);
+ }
+ static inline Location bindingArrayElement (deUint32 binding_, deUint32 arrayElement)
+ {
+ return Location(binding_, arrayElement);
+ }
+
+ private:
+ // \note private to force use of factory methods that have more descriptive names
+ inline Location (deUint32 binding_, deUint32 arrayElement)
+ : m_binding (binding_)
+ , m_arrayElement (arrayElement)
+ {
+ }
+
+ friend class DescriptorSetUpdateBuilder;
+
+ const deUint32 m_binding;
+ const deUint32 m_arrayElement;
+ };
+
+ DescriptorSetUpdateBuilder (void);
+
+ DescriptorSetUpdateBuilder& write (VkDescriptorSet destSet,
+ deUint32 destBinding,
+ deUint32 destArrayElement,
+ deUint32 count,
+ VkDescriptorType descriptorType,
+ const VkDescriptorInfo* pDescriptors);
+
+ DescriptorSetUpdateBuilder& copy (VkDescriptorSet srcSet,
+ deUint32 srcBinding,
+ deUint32 srcArrayElement,
+ VkDescriptorSet destSet,
+ deUint32 destBinding,
+ deUint32 destArrayElement,
+ deUint32 count);
+
+ void update (const DeviceInterface& vk, VkDevice device) const;
+
+ // helpers
+
+ inline DescriptorSetUpdateBuilder& writeSingle (VkDescriptorSet destSet,
+ const Location& destLocation,
+ VkDescriptorType descriptorType,
+ const VkDescriptorInfo* descriptor)
+ {
+ return write(destSet, destLocation.m_binding, destLocation.m_arrayElement, 1u, descriptorType, descriptor);
+ }
+
+ inline DescriptorSetUpdateBuilder& writeArray (VkDescriptorSet destSet,
+ const Location& destLocation,
+ VkDescriptorType descriptorType,
+ deUint32 numDescriptors,
+ const VkDescriptorInfo* descriptors)
+ {
+ return write(destSet, destLocation.m_binding, destLocation.m_arrayElement, numDescriptors, descriptorType, descriptors);
+ }
+
+ inline DescriptorSetUpdateBuilder& copySingle (VkDescriptorSet srcSet,
+ const Location& srcLocation,
+ VkDescriptorSet destSet,
+ const Location& destLocation)
+ {
+ return copy(srcSet, srcLocation.m_binding, srcLocation.m_arrayElement, destSet, destLocation.m_binding, destLocation.m_arrayElement, 1u);
+ }
+
+ inline DescriptorSetUpdateBuilder& copyArray (VkDescriptorSet srcSet,
+ const Location& srcLocation,
+ VkDescriptorSet destSet,
+ const Location& destLocation,
+ deUint32 count)
+ {
+ return copy(srcSet, srcLocation.m_binding, srcLocation.m_arrayElement, destSet, destLocation.m_binding, destLocation.m_arrayElement, count);
+ }
+
+private:
+ DescriptorSetUpdateBuilder (const DescriptorSetUpdateBuilder&); // delete
+ DescriptorSetUpdateBuilder& operator= (const DescriptorSetUpdateBuilder&); // delete
+
+ std::vector<VkWriteDescriptorSet> m_writes;
+ std::vector<VkCopyDescriptorSet> m_copies;
+};
+
+} // vk
+
+#endif // _VKBUILDERUTIL_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+virtual VkResult destroyDevice (VkDevice device) const;
+virtual VkResult getGlobalExtensionProperties (const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties) const;
+virtual VkResult getPhysicalDeviceExtensionProperties (VkPhysicalDevice physicalDevice, const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties) const;
+virtual VkResult getGlobalLayerProperties (deUint32* pCount, VkLayerProperties* pProperties) const;
+virtual VkResult getPhysicalDeviceLayerProperties (VkPhysicalDevice physicalDevice, deUint32* pCount, VkLayerProperties* pProperties) const;
+virtual VkResult getDeviceQueue (VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex, VkQueue* pQueue) const;
+virtual VkResult queueSubmit (VkQueue queue, deUint32 cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence) const;
+virtual VkResult queueWaitIdle (VkQueue queue) const;
+virtual VkResult deviceWaitIdle (VkDevice device) const;
+virtual VkResult allocMemory (VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem) const;
+virtual VkResult freeMemory (VkDevice device, VkDeviceMemory mem) const;
+virtual VkResult mapMemory (VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData) const;
+virtual VkResult unmapMemory (VkDevice device, VkDeviceMemory mem) const;
+virtual VkResult flushMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges) const;
+virtual VkResult invalidateMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges) const;
+virtual VkResult getDeviceMemoryCommitment (VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes) const;
+virtual VkResult bindBufferMemory (VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset) const;
+virtual VkResult bindImageMemory (VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset) const;
+virtual VkResult getBufferMemoryRequirements (VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements) const;
+virtual VkResult getImageMemoryRequirements (VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements) const;
+virtual VkResult getImageSparseMemoryRequirements (VkDevice device, VkImage image, deUint32* pNumRequirements, VkSparseImageMemoryRequirements* pSparseMemoryRequirements) const;
+virtual VkResult getPhysicalDeviceSparseImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, deUint32 samples, VkImageUsageFlags usage, VkImageTiling tiling, deUint32* pNumProperties, VkSparseImageFormatProperties* pProperties) const;
+virtual VkResult queueBindSparseBufferMemory (VkQueue queue, VkBuffer buffer, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo) const;
+virtual VkResult queueBindSparseImageOpaqueMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo) const;
+virtual VkResult queueBindSparseImageMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseImageMemoryBindInfo* pBindInfo) const;
+virtual VkResult createFence (VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence) const;
+virtual VkResult destroyFence (VkDevice device, VkFence fence) const;
+virtual VkResult resetFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences) const;
+virtual VkResult getFenceStatus (VkDevice device, VkFence fence) const;
+virtual VkResult waitForFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences, VkBool32 waitAll, deUint64 timeout) const;
+virtual VkResult createSemaphore (VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore) const;
+virtual VkResult destroySemaphore (VkDevice device, VkSemaphore semaphore) const;
+virtual VkResult queueSignalSemaphore (VkQueue queue, VkSemaphore semaphore) const;
+virtual VkResult queueWaitSemaphore (VkQueue queue, VkSemaphore semaphore) const;
+virtual VkResult createEvent (VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent) const;
+virtual VkResult destroyEvent (VkDevice device, VkEvent event) const;
+virtual VkResult getEventStatus (VkDevice device, VkEvent event) const;
+virtual VkResult setEvent (VkDevice device, VkEvent event) const;
+virtual VkResult resetEvent (VkDevice device, VkEvent event) const;
+virtual VkResult createQueryPool (VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool) const;
+virtual VkResult destroyQueryPool (VkDevice device, VkQueryPool queryPool) const;
+virtual VkResult getQueryPoolResults (VkDevice device, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, deUintptr* pDataSize, void* pData, VkQueryResultFlags flags) const;
+virtual VkResult createBuffer (VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer) const;
+virtual VkResult destroyBuffer (VkDevice device, VkBuffer buffer) const;
+virtual VkResult createBufferView (VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView) const;
+virtual VkResult destroyBufferView (VkDevice device, VkBufferView bufferView) const;
+virtual VkResult createImage (VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage) const;
+virtual VkResult destroyImage (VkDevice device, VkImage image) const;
+virtual VkResult getImageSubresourceLayout (VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const;
+virtual VkResult createImageView (VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView) const;
+virtual VkResult destroyImageView (VkDevice device, VkImageView imageView) const;
+virtual VkResult createAttachmentView (VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo, VkAttachmentView* pView) const;
+virtual VkResult destroyAttachmentView (VkDevice device, VkAttachmentView attachmentView) const;
+virtual VkResult createShaderModule (VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModule* pShaderModule) const;
+virtual VkResult destroyShaderModule (VkDevice device, VkShaderModule shaderModule) const;
+virtual VkResult createShader (VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader) const;
+virtual VkResult destroyShader (VkDevice device, VkShader shader) const;
+virtual VkResult createPipelineCache (VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, VkPipelineCache* pPipelineCache) const;
+virtual VkResult destroyPipelineCache (VkDevice device, VkPipelineCache pipelineCache) const;
+virtual deUintptr getPipelineCacheSize (VkDevice device, VkPipelineCache pipelineCache) const;
+virtual VkResult getPipelineCacheData (VkDevice device, VkPipelineCache pipelineCache, void* pData) const;
+virtual VkResult mergePipelineCaches (VkDevice device, VkPipelineCache destCache, deUint32 srcCacheCount, const VkPipelineCache* pSrcCaches) const;
+virtual VkResult createGraphicsPipelines (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkGraphicsPipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines) const;
+virtual VkResult createComputePipelines (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkComputePipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines) const;
+virtual VkResult destroyPipeline (VkDevice device, VkPipeline pipeline) const;
+virtual VkResult createPipelineLayout (VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, VkPipelineLayout* pPipelineLayout) const;
+virtual VkResult destroyPipelineLayout (VkDevice device, VkPipelineLayout pipelineLayout) const;
+virtual VkResult createSampler (VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler) const;
+virtual VkResult destroySampler (VkDevice device, VkSampler sampler) const;
+virtual VkResult createDescriptorSetLayout (VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout) const;
+virtual VkResult destroyDescriptorSetLayout (VkDevice device, VkDescriptorSetLayout descriptorSetLayout) const;
+virtual VkResult createDescriptorPool (VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool) const;
+virtual VkResult destroyDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool) const;
+virtual VkResult resetDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool) const;
+virtual VkResult allocDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, deUint32 count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, deUint32* pCount) const;
+virtual VkResult freeDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, deUint32 count, const VkDescriptorSet* pDescriptorSets) const;
+virtual VkResult updateDescriptorSets (VkDevice device, deUint32 writeCount, const VkWriteDescriptorSet* pDescriptorWrites, deUint32 copyCount, const VkCopyDescriptorSet* pDescriptorCopies) const;
+virtual VkResult createDynamicViewportState (VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo, VkDynamicViewportState* pState) const;
+virtual VkResult destroyDynamicViewportState (VkDevice device, VkDynamicViewportState dynamicViewportState) const;
+virtual VkResult createDynamicRasterState (VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo, VkDynamicRasterState* pState) const;
+virtual VkResult destroyDynamicRasterState (VkDevice device, VkDynamicRasterState dynamicRasterState) const;
+virtual VkResult createDynamicColorBlendState (VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo, VkDynamicColorBlendState* pState) const;
+virtual VkResult destroyDynamicColorBlendState (VkDevice device, VkDynamicColorBlendState dynamicColorBlendState) const;
+virtual VkResult createDynamicDepthStencilState (VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo, VkDynamicDepthStencilState* pState) const;
+virtual VkResult destroyDynamicDepthStencilState (VkDevice device, VkDynamicDepthStencilState dynamicDepthStencilState) const;
+virtual VkResult createFramebuffer (VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer) const;
+virtual VkResult destroyFramebuffer (VkDevice device, VkFramebuffer framebuffer) const;
+virtual VkResult createRenderPass (VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass) const;
+virtual VkResult destroyRenderPass (VkDevice device, VkRenderPass renderPass) const;
+virtual VkResult getRenderAreaGranularity (VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity) const;
+virtual VkResult createCommandPool (VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo, VkCmdPool* pCmdPool) const;
+virtual VkResult destroyCommandPool (VkDevice device, VkCmdPool cmdPool) const;
+virtual VkResult resetCommandPool (VkDevice device, VkCmdPool cmdPool, VkCmdPoolResetFlags flags) const;
+virtual VkResult createCommandBuffer (VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer) const;
+virtual VkResult destroyCommandBuffer (VkDevice device, VkCmdBuffer commandBuffer) const;
+virtual VkResult beginCommandBuffer (VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo) const;
+virtual VkResult endCommandBuffer (VkCmdBuffer cmdBuffer) const;
+virtual VkResult resetCommandBuffer (VkCmdBuffer cmdBuffer, VkCmdBufferResetFlags flags) const;
+virtual void cmdBindPipeline (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const;
+virtual void cmdBindDynamicViewportState (VkCmdBuffer cmdBuffer, VkDynamicViewportState dynamicViewportState) const;
+virtual void cmdBindDynamicRasterState (VkCmdBuffer cmdBuffer, VkDynamicRasterState dynamicRasterState) const;
+virtual void cmdBindDynamicColorBlendState (VkCmdBuffer cmdBuffer, VkDynamicColorBlendState dynamicColorBlendState) const;
+virtual void cmdBindDynamicDepthStencilState (VkCmdBuffer cmdBuffer, VkDynamicDepthStencilState dynamicDepthStencilState) const;
+virtual void cmdBindDescriptorSets (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, deUint32 firstSet, deUint32 setCount, const VkDescriptorSet* pDescriptorSets, deUint32 dynamicOffsetCount, const deUint32* pDynamicOffsets) const;
+virtual void cmdBindIndexBuffer (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const;
+virtual void cmdBindVertexBuffers (VkCmdBuffer cmdBuffer, deUint32 startBinding, deUint32 bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets) const;
+virtual void cmdDraw (VkCmdBuffer cmdBuffer, deUint32 firstVertex, deUint32 vertexCount, deUint32 firstInstance, deUint32 instanceCount) const;
+virtual void cmdDrawIndexed (VkCmdBuffer cmdBuffer, deUint32 firstIndex, deUint32 indexCount, deInt32 vertexOffset, deUint32 firstInstance, deUint32 instanceCount) const;
+virtual void cmdDrawIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride) const;
+virtual void cmdDrawIndexedIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride) const;
+virtual void cmdDispatch (VkCmdBuffer cmdBuffer, deUint32 x, deUint32 y, deUint32 z) const;
+virtual void cmdDispatchIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset) const;
+virtual void cmdCopyBuffer (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, deUint32 regionCount, const VkBufferCopy* pRegions) const;
+virtual void cmdCopyImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageCopy* pRegions) const;
+virtual void cmdBlitImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageBlit* pRegions, VkTexFilter filter) const;
+virtual void cmdCopyBufferToImage (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkBufferImageCopy* pRegions) const;
+virtual void cmdCopyImageToBuffer (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, deUint32 regionCount, const VkBufferImageCopy* pRegions) const;
+virtual void cmdUpdateBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const deUint32* pData) const;
+virtual void cmdFillBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, deUint32 data) const;
+virtual void cmdClearColorImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rangeCount, const VkImageSubresourceRange* pRanges) const;
+virtual void cmdClearDepthStencilImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rangeCount, const VkImageSubresourceRange* pRanges) const;
+virtual void cmdClearColorAttachment (VkCmdBuffer cmdBuffer, deUint32 colorAttachment, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rectCount, const VkRect3D* pRects) const;
+virtual void cmdClearDepthStencilAttachment (VkCmdBuffer cmdBuffer, VkImageAspectFlags imageAspectMask, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rectCount, const VkRect3D* pRects) const;
+virtual void cmdResolveImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageResolve* pRegions) const;
+virtual void cmdSetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) const;
+virtual void cmdResetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) const;
+virtual void cmdWaitEvents (VkCmdBuffer cmdBuffer, deUint32 eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, deUint32 memBarrierCount, const void* const* ppMemBarriers) const;
+virtual void cmdPipelineBarrier (VkCmdBuffer cmdBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, VkBool32 byRegion, deUint32 memBarrierCount, const void* const* ppMemBarriers) const;
+virtual void cmdBeginQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot, VkQueryControlFlags flags) const;
+virtual void cmdEndQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot) const;
+virtual void cmdResetQueryPool (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount) const;
+virtual void cmdWriteTimestamp (VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset) const;
+virtual void cmdCopyQueryPoolResults (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize destStride, VkQueryResultFlags flags) const;
+virtual void cmdPushConstants (VkCmdBuffer cmdBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, deUint32 start, deUint32 length, const void* values) const;
+virtual void cmdBeginRenderPass (VkCmdBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkRenderPassContents contents) const;
+virtual void cmdNextSubpass (VkCmdBuffer cmdBuffer, VkRenderPassContents contents) const;
+virtual void cmdEndRenderPass (VkCmdBuffer cmdBuffer) const;
+virtual void cmdExecuteCommands (VkCmdBuffer cmdBuffer, deUint32 cmdBuffersCount, const VkCmdBuffer* pCmdBuffers) const;
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+virtual VkResult destroyInstance (VkInstance instance) const;
+virtual VkResult enumeratePhysicalDevices (VkInstance instance, deUint32* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const;
+virtual VkResult getPhysicalDeviceFeatures (VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures) const;
+virtual VkResult getPhysicalDeviceFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties) const;
+virtual VkResult getPhysicalDeviceImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageFormatProperties* pImageFormatProperties) const;
+virtual VkResult getPhysicalDeviceLimits (VkPhysicalDevice physicalDevice, VkPhysicalDeviceLimits* pLimits) const;
+virtual VkResult getPhysicalDeviceProperties (VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties) const;
+virtual VkResult getPhysicalDeviceQueueCount (VkPhysicalDevice physicalDevice, deUint32* pCount) const;
+virtual VkResult getPhysicalDeviceQueueProperties (VkPhysicalDevice physicalDevice, deUint32 count, VkPhysicalDeviceQueueProperties* pQueueProperties) const;
+virtual VkResult getPhysicalDeviceMemoryProperties (VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties) const;
+virtual PFN_vkVoidFunction getDeviceProcAddr (VkDevice device, const char* pName) const;
+virtual VkResult createDevice (VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice) const;
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+virtual VkResult createInstance (const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance) const;
+virtual PFN_vkVoidFunction getInstanceProcAddr (VkInstance instance, const char* pName) const;
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan utilites.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkStrUtil.hpp"
+
+#include <sstream>
+
+DE_STATIC_ASSERT(sizeof(vk::VkImageType) == sizeof(deUint32));
+DE_STATIC_ASSERT(sizeof(vk::VkResult) == sizeof(deUint32));
+DE_STATIC_ASSERT(sizeof(vk::VkDevice) == sizeof(void*));
+DE_STATIC_ASSERT(sizeof(vk::VkBuffer) == sizeof(deUint64));
+
+namespace vk
+{
+
+static bool isOutOfMemoryError (VkResult result)
+{
+ return result == VK_ERROR_OUT_OF_DEVICE_MEMORY ||
+ result == VK_ERROR_OUT_OF_HOST_MEMORY;
+}
+
+Error::Error (VkResult error, const char* message, const char* expr, const char* file, int line)
+ : tcu::TestError (message, expr, file, line)
+ , m_error (error)
+{
+}
+
+Error::Error (VkResult error, const std::string& message)
+ : tcu::TestError (message)
+ , m_error (error)
+{
+}
+
+Error::~Error (void) throw()
+{
+}
+
+OutOfMemoryError::OutOfMemoryError (VkResult error, const char* message, const char* expr, const char* file, int line)
+ : tcu::ResourceError(message, expr, file, line)
+ , m_error (error)
+{
+ DE_ASSERT(isOutOfMemoryError(error));
+}
+
+OutOfMemoryError::OutOfMemoryError (VkResult error, const std::string& message)
+ : tcu::ResourceError(message)
+ , m_error (error)
+{
+ DE_ASSERT(isOutOfMemoryError(error));
+}
+
+OutOfMemoryError::~OutOfMemoryError (void) throw()
+{
+}
+
+void checkResult (VkResult result, const char* msg, const char* file, int line)
+{
+ if (result != VK_SUCCESS)
+ {
+ std::ostringstream msgStr;
+ if (msg)
+ msgStr << msg << ": ";
+
+ msgStr << getResultStr(result);
+
+ if (isOutOfMemoryError(result))
+ throw OutOfMemoryError(result, msgStr.str().c_str(), DE_NULL, file, line);
+ else if (result == VK_UNSUPPORTED)
+ throw tcu::NotSupportedError(msgStr.str().c_str(), DE_NULL, file, line);
+ else
+ throw Error(result, msgStr.str().c_str(), DE_NULL, file, line);
+ }
+}
+
+VkClearValue clearValueColorF32 (float r, float g, float b, float a)
+{
+ VkClearValue v;
+ v.color.f32[0] = r;
+ v.color.f32[1] = g;
+ v.color.f32[2] = b;
+ v.color.f32[3] = a;
+ return v;
+}
+
+} // vk
--- /dev/null
+#ifndef _VKDEFS_HPP
+#define _VKDEFS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan utilites.
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+
+#if (DE_OS == DE_OS_ANDROID)
+# include <sys/cdefs.h>
+# if !defined(__NDK_FPABI__)
+# define __NDK_FPABI__
+# endif
+# define VK_APICALL __NDK_FPABI__
+#else
+# define VK_APICALL
+#endif
+
+#if (DE_OS == DE_OS_WIN32) && (_MSC_VER >= 800) || defined(_STDCALL_SUPPORTED)
+# define VK_APIENTRY __stdcall
+#else
+# define VK_APIENTRY
+#endif
+
+#define VK_DEFINE_HANDLE(NAME, TYPE) typedef struct NAME##_s* NAME
+#define VK_DEFINE_NONDISP_HANDLE(NAME, TYPE) typedef Handle<TYPE> NAME
+
+#define VK_MAKE_VERSION(MAJOR, MINOR, PATCH) ((MAJOR << 22) | (MINOR << 12) | PATCH)
+#define VK_BIT(NUM) (1<<NUM)
+
+#define VK_CHECK(EXPR) vk::checkResult((EXPR), #EXPR, __FILE__, __LINE__)
+#define VK_CHECK_MSG(EXPR, MSG) vk::checkResult((EXPR), MSG, __FILE__, __LINE__)
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Vulkan utilities
+ *//*--------------------------------------------------------------------*/
+namespace vk
+{
+
+typedef deUint64 VkDeviceSize;
+typedef deUint32 VkSampleMask;
+typedef deUint32 VkBool32;
+
+typedef deUint32 VkShaderCreateFlags; // Reserved
+typedef deUint32 VkEventCreateFlags; // Reserved
+typedef deUint32 VkCmdBufferCreateFlags; // Reserved
+typedef deUint32 VkSemaphoreCreateFlags; // Reserved
+typedef deUint32 VkShaderModuleCreateFlags; // Reserved
+typedef deUint32 VkMemoryMapFlags; // \todo [2015-05-08 pyry] Reserved? Not documented
+
+// enum HandleType { HANDLE_TYPE_INSTANCE, ... };
+#include "vkHandleType.inl"
+
+template<HandleType Type>
+class Handle
+{
+public:
+ Handle (void) {} // \note Left uninitialized on purpose
+ Handle (deUint64 internal) : m_internal(internal) {}
+
+ Handle& operator= (deUint64 internal) { m_internal = internal; return *this; }
+
+ bool operator== (const Handle<Type>& other) const { return this->m_internal == other.m_internal; }
+ bool operator!= (const Handle<Type>& other) const { return this->m_internal != other.m_internal; }
+
+ bool operator! (void) const { return !m_internal; }
+
+ deUint64 getInternal (void) const { return m_internal; }
+
+ enum { HANDLE_TYPE = Type };
+
+private:
+ deUint64 m_internal;
+};
+
+#include "vkBasicTypes.inl"
+
+enum { VK_QUEUE_FAMILY_IGNORED = 0xffffffff };
+enum { VK_NO_ATTACHMENT = 0xffffffff };
+
+typedef VK_APICALL void (VK_APIENTRY* PFN_vkVoidFunction) (void);
+
+typedef VK_APICALL void* (VK_APIENTRY* PFN_vkAllocFunction) (void* pUserData, deUintptr size, deUintptr alignment, VkSystemAllocType allocType);
+typedef VK_APICALL void (VK_APIENTRY* PFN_vkFreeFunction) (void* pUserData, void* pMem);
+
+#include "vkStructTypes.inl"
+
+extern "C"
+{
+#include "vkFunctionPointerTypes.inl"
+}
+
+class PlatformInterface
+{
+public:
+#include "vkVirtualPlatformInterface.inl"
+
+protected:
+ PlatformInterface (void) {}
+
+private:
+ PlatformInterface (const PlatformInterface&);
+ PlatformInterface& operator= (const PlatformInterface&);
+};
+
+class InstanceInterface
+{
+public:
+#include "vkVirtualInstanceInterface.inl"
+
+protected:
+ InstanceInterface (void) {}
+
+private:
+ InstanceInterface (const InstanceInterface&);
+ InstanceInterface& operator= (const InstanceInterface&);
+};
+
+class DeviceInterface
+{
+public:
+#include "vkVirtualDeviceInterface.inl"
+
+protected:
+ DeviceInterface (void) {}
+
+private:
+ DeviceInterface (const DeviceInterface&);
+ DeviceInterface& operator= (const DeviceInterface&);
+};
+
+class Error : public tcu::TestError
+{
+public:
+ Error (VkResult error, const char* message, const char* expr, const char* file, int line);
+ Error (VkResult error, const std::string& message);
+ virtual ~Error (void) throw();
+
+ VkResult getError (void) const { return m_error; }
+
+private:
+ const VkResult m_error;
+};
+
+class OutOfMemoryError : public tcu::ResourceError
+{
+public:
+ OutOfMemoryError (VkResult error, const char* message, const char* expr, const char* file, int line);
+ OutOfMemoryError (VkResult error, const std::string& message);
+ virtual ~OutOfMemoryError (void) throw();
+
+ VkResult getError (void) const { return m_error; }
+
+private:
+ const VkResult m_error;
+};
+
+void checkResult (VkResult result, const char* message, const char* file, int line);
+
+// \todo [2015-07-30 jarkko] move to vkStructUtil/TypeUtils
+VkClearValue clearValueColorF32 (float r, float g, float b, float a);
+
+} // vk
+
+#endif // _VKDEFS_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+
+VkResult DeviceDriver::destroyDevice (VkDevice device) const
+{
+ return m_vk.destroyDevice(device);
+}
+
+VkResult DeviceDriver::getGlobalExtensionProperties (const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties) const
+{
+ return m_vk.getGlobalExtensionProperties(pLayerName, pCount, pProperties);
+}
+
+VkResult DeviceDriver::getPhysicalDeviceExtensionProperties (VkPhysicalDevice physicalDevice, const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties) const
+{
+ return m_vk.getPhysicalDeviceExtensionProperties(physicalDevice, pLayerName, pCount, pProperties);
+}
+
+VkResult DeviceDriver::getGlobalLayerProperties (deUint32* pCount, VkLayerProperties* pProperties) const
+{
+ return m_vk.getGlobalLayerProperties(pCount, pProperties);
+}
+
+VkResult DeviceDriver::getPhysicalDeviceLayerProperties (VkPhysicalDevice physicalDevice, deUint32* pCount, VkLayerProperties* pProperties) const
+{
+ return m_vk.getPhysicalDeviceLayerProperties(physicalDevice, pCount, pProperties);
+}
+
+VkResult DeviceDriver::getDeviceQueue (VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex, VkQueue* pQueue) const
+{
+ return m_vk.getDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
+}
+
+VkResult DeviceDriver::queueSubmit (VkQueue queue, deUint32 cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence) const
+{
+ return m_vk.queueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
+}
+
+VkResult DeviceDriver::queueWaitIdle (VkQueue queue) const
+{
+ return m_vk.queueWaitIdle(queue);
+}
+
+VkResult DeviceDriver::deviceWaitIdle (VkDevice device) const
+{
+ return m_vk.deviceWaitIdle(device);
+}
+
+VkResult DeviceDriver::allocMemory (VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem) const
+{
+ return m_vk.allocMemory(device, pAllocInfo, pMem);
+}
+
+VkResult DeviceDriver::freeMemory (VkDevice device, VkDeviceMemory mem) const
+{
+ return m_vk.freeMemory(device, mem);
+}
+
+VkResult DeviceDriver::mapMemory (VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData) const
+{
+ return m_vk.mapMemory(device, mem, offset, size, flags, ppData);
+}
+
+VkResult DeviceDriver::unmapMemory (VkDevice device, VkDeviceMemory mem) const
+{
+ return m_vk.unmapMemory(device, mem);
+}
+
+VkResult DeviceDriver::flushMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges) const
+{
+ return m_vk.flushMappedMemoryRanges(device, memRangeCount, pMemRanges);
+}
+
+VkResult DeviceDriver::invalidateMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges) const
+{
+ return m_vk.invalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
+}
+
+VkResult DeviceDriver::getDeviceMemoryCommitment (VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes) const
+{
+ return m_vk.getDeviceMemoryCommitment(device, memory, pCommittedMemoryInBytes);
+}
+
+VkResult DeviceDriver::bindBufferMemory (VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset) const
+{
+ return m_vk.bindBufferMemory(device, buffer, mem, memOffset);
+}
+
+VkResult DeviceDriver::bindImageMemory (VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset) const
+{
+ return m_vk.bindImageMemory(device, image, mem, memOffset);
+}
+
+VkResult DeviceDriver::getBufferMemoryRequirements (VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements) const
+{
+ return m_vk.getBufferMemoryRequirements(device, buffer, pMemoryRequirements);
+}
+
+VkResult DeviceDriver::getImageMemoryRequirements (VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements) const
+{
+ return m_vk.getImageMemoryRequirements(device, image, pMemoryRequirements);
+}
+
+VkResult DeviceDriver::getImageSparseMemoryRequirements (VkDevice device, VkImage image, deUint32* pNumRequirements, VkSparseImageMemoryRequirements* pSparseMemoryRequirements) const
+{
+ return m_vk.getImageSparseMemoryRequirements(device, image, pNumRequirements, pSparseMemoryRequirements);
+}
+
+VkResult DeviceDriver::getPhysicalDeviceSparseImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, deUint32 samples, VkImageUsageFlags usage, VkImageTiling tiling, deUint32* pNumProperties, VkSparseImageFormatProperties* pProperties) const
+{
+ return m_vk.getPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pNumProperties, pProperties);
+}
+
+VkResult DeviceDriver::queueBindSparseBufferMemory (VkQueue queue, VkBuffer buffer, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo) const
+{
+ return m_vk.queueBindSparseBufferMemory(queue, buffer, numBindings, pBindInfo);
+}
+
+VkResult DeviceDriver::queueBindSparseImageOpaqueMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo) const
+{
+ return m_vk.queueBindSparseImageOpaqueMemory(queue, image, numBindings, pBindInfo);
+}
+
+VkResult DeviceDriver::queueBindSparseImageMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseImageMemoryBindInfo* pBindInfo) const
+{
+ return m_vk.queueBindSparseImageMemory(queue, image, numBindings, pBindInfo);
+}
+
+VkResult DeviceDriver::createFence (VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence) const
+{
+ return m_vk.createFence(device, pCreateInfo, pFence);
+}
+
+VkResult DeviceDriver::destroyFence (VkDevice device, VkFence fence) const
+{
+ return m_vk.destroyFence(device, fence);
+}
+
+VkResult DeviceDriver::resetFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences) const
+{
+ return m_vk.resetFences(device, fenceCount, pFences);
+}
+
+VkResult DeviceDriver::getFenceStatus (VkDevice device, VkFence fence) const
+{
+ return m_vk.getFenceStatus(device, fence);
+}
+
+VkResult DeviceDriver::waitForFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences, VkBool32 waitAll, deUint64 timeout) const
+{
+ return m_vk.waitForFences(device, fenceCount, pFences, waitAll, timeout);
+}
+
+VkResult DeviceDriver::createSemaphore (VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore) const
+{
+ return m_vk.createSemaphore(device, pCreateInfo, pSemaphore);
+}
+
+VkResult DeviceDriver::destroySemaphore (VkDevice device, VkSemaphore semaphore) const
+{
+ return m_vk.destroySemaphore(device, semaphore);
+}
+
+VkResult DeviceDriver::queueSignalSemaphore (VkQueue queue, VkSemaphore semaphore) const
+{
+ return m_vk.queueSignalSemaphore(queue, semaphore);
+}
+
+VkResult DeviceDriver::queueWaitSemaphore (VkQueue queue, VkSemaphore semaphore) const
+{
+ return m_vk.queueWaitSemaphore(queue, semaphore);
+}
+
+VkResult DeviceDriver::createEvent (VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent) const
+{
+ return m_vk.createEvent(device, pCreateInfo, pEvent);
+}
+
+VkResult DeviceDriver::destroyEvent (VkDevice device, VkEvent event) const
+{
+ return m_vk.destroyEvent(device, event);
+}
+
+VkResult DeviceDriver::getEventStatus (VkDevice device, VkEvent event) const
+{
+ return m_vk.getEventStatus(device, event);
+}
+
+VkResult DeviceDriver::setEvent (VkDevice device, VkEvent event) const
+{
+ return m_vk.setEvent(device, event);
+}
+
+VkResult DeviceDriver::resetEvent (VkDevice device, VkEvent event) const
+{
+ return m_vk.resetEvent(device, event);
+}
+
+VkResult DeviceDriver::createQueryPool (VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool) const
+{
+ return m_vk.createQueryPool(device, pCreateInfo, pQueryPool);
+}
+
+VkResult DeviceDriver::destroyQueryPool (VkDevice device, VkQueryPool queryPool) const
+{
+ return m_vk.destroyQueryPool(device, queryPool);
+}
+
+VkResult DeviceDriver::getQueryPoolResults (VkDevice device, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, deUintptr* pDataSize, void* pData, VkQueryResultFlags flags) const
+{
+ return m_vk.getQueryPoolResults(device, queryPool, startQuery, queryCount, pDataSize, pData, flags);
+}
+
+VkResult DeviceDriver::createBuffer (VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer) const
+{
+ return m_vk.createBuffer(device, pCreateInfo, pBuffer);
+}
+
+VkResult DeviceDriver::destroyBuffer (VkDevice device, VkBuffer buffer) const
+{
+ return m_vk.destroyBuffer(device, buffer);
+}
+
+VkResult DeviceDriver::createBufferView (VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView) const
+{
+ return m_vk.createBufferView(device, pCreateInfo, pView);
+}
+
+VkResult DeviceDriver::destroyBufferView (VkDevice device, VkBufferView bufferView) const
+{
+ return m_vk.destroyBufferView(device, bufferView);
+}
+
+VkResult DeviceDriver::createImage (VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage) const
+{
+ return m_vk.createImage(device, pCreateInfo, pImage);
+}
+
+VkResult DeviceDriver::destroyImage (VkDevice device, VkImage image) const
+{
+ return m_vk.destroyImage(device, image);
+}
+
+VkResult DeviceDriver::getImageSubresourceLayout (VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const
+{
+ return m_vk.getImageSubresourceLayout(device, image, pSubresource, pLayout);
+}
+
+VkResult DeviceDriver::createImageView (VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView) const
+{
+ return m_vk.createImageView(device, pCreateInfo, pView);
+}
+
+VkResult DeviceDriver::destroyImageView (VkDevice device, VkImageView imageView) const
+{
+ return m_vk.destroyImageView(device, imageView);
+}
+
+VkResult DeviceDriver::createAttachmentView (VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo, VkAttachmentView* pView) const
+{
+ return m_vk.createAttachmentView(device, pCreateInfo, pView);
+}
+
+VkResult DeviceDriver::destroyAttachmentView (VkDevice device, VkAttachmentView attachmentView) const
+{
+ return m_vk.destroyAttachmentView(device, attachmentView);
+}
+
+VkResult DeviceDriver::createShaderModule (VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModule* pShaderModule) const
+{
+ return m_vk.createShaderModule(device, pCreateInfo, pShaderModule);
+}
+
+VkResult DeviceDriver::destroyShaderModule (VkDevice device, VkShaderModule shaderModule) const
+{
+ return m_vk.destroyShaderModule(device, shaderModule);
+}
+
+VkResult DeviceDriver::createShader (VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader) const
+{
+ return m_vk.createShader(device, pCreateInfo, pShader);
+}
+
+VkResult DeviceDriver::destroyShader (VkDevice device, VkShader shader) const
+{
+ return m_vk.destroyShader(device, shader);
+}
+
+VkResult DeviceDriver::createPipelineCache (VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, VkPipelineCache* pPipelineCache) const
+{
+ return m_vk.createPipelineCache(device, pCreateInfo, pPipelineCache);
+}
+
+VkResult DeviceDriver::destroyPipelineCache (VkDevice device, VkPipelineCache pipelineCache) const
+{
+ return m_vk.destroyPipelineCache(device, pipelineCache);
+}
+
+deUintptr DeviceDriver::getPipelineCacheSize (VkDevice device, VkPipelineCache pipelineCache) const
+{
+ return m_vk.getPipelineCacheSize(device, pipelineCache);
+}
+
+VkResult DeviceDriver::getPipelineCacheData (VkDevice device, VkPipelineCache pipelineCache, void* pData) const
+{
+ return m_vk.getPipelineCacheData(device, pipelineCache, pData);
+}
+
+VkResult DeviceDriver::mergePipelineCaches (VkDevice device, VkPipelineCache destCache, deUint32 srcCacheCount, const VkPipelineCache* pSrcCaches) const
+{
+ return m_vk.mergePipelineCaches(device, destCache, srcCacheCount, pSrcCaches);
+}
+
+VkResult DeviceDriver::createGraphicsPipelines (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkGraphicsPipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines) const
+{
+ return m_vk.createGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pPipelines);
+}
+
+VkResult DeviceDriver::createComputePipelines (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkComputePipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines) const
+{
+ return m_vk.createComputePipelines(device, pipelineCache, count, pCreateInfos, pPipelines);
+}
+
+VkResult DeviceDriver::destroyPipeline (VkDevice device, VkPipeline pipeline) const
+{
+ return m_vk.destroyPipeline(device, pipeline);
+}
+
+VkResult DeviceDriver::createPipelineLayout (VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, VkPipelineLayout* pPipelineLayout) const
+{
+ return m_vk.createPipelineLayout(device, pCreateInfo, pPipelineLayout);
+}
+
+VkResult DeviceDriver::destroyPipelineLayout (VkDevice device, VkPipelineLayout pipelineLayout) const
+{
+ return m_vk.destroyPipelineLayout(device, pipelineLayout);
+}
+
+VkResult DeviceDriver::createSampler (VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler) const
+{
+ return m_vk.createSampler(device, pCreateInfo, pSampler);
+}
+
+VkResult DeviceDriver::destroySampler (VkDevice device, VkSampler sampler) const
+{
+ return m_vk.destroySampler(device, sampler);
+}
+
+VkResult DeviceDriver::createDescriptorSetLayout (VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout) const
+{
+ return m_vk.createDescriptorSetLayout(device, pCreateInfo, pSetLayout);
+}
+
+VkResult DeviceDriver::destroyDescriptorSetLayout (VkDevice device, VkDescriptorSetLayout descriptorSetLayout) const
+{
+ return m_vk.destroyDescriptorSetLayout(device, descriptorSetLayout);
+}
+
+VkResult DeviceDriver::createDescriptorPool (VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool) const
+{
+ return m_vk.createDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
+}
+
+VkResult DeviceDriver::destroyDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool) const
+{
+ return m_vk.destroyDescriptorPool(device, descriptorPool);
+}
+
+VkResult DeviceDriver::resetDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool) const
+{
+ return m_vk.resetDescriptorPool(device, descriptorPool);
+}
+
+VkResult DeviceDriver::allocDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, deUint32 count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, deUint32* pCount) const
+{
+ return m_vk.allocDescriptorSets(device, descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
+}
+
+VkResult DeviceDriver::freeDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, deUint32 count, const VkDescriptorSet* pDescriptorSets) const
+{
+ return m_vk.freeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
+}
+
+VkResult DeviceDriver::updateDescriptorSets (VkDevice device, deUint32 writeCount, const VkWriteDescriptorSet* pDescriptorWrites, deUint32 copyCount, const VkCopyDescriptorSet* pDescriptorCopies) const
+{
+ return m_vk.updateDescriptorSets(device, writeCount, pDescriptorWrites, copyCount, pDescriptorCopies);
+}
+
+VkResult DeviceDriver::createDynamicViewportState (VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo, VkDynamicViewportState* pState) const
+{
+ return m_vk.createDynamicViewportState(device, pCreateInfo, pState);
+}
+
+VkResult DeviceDriver::destroyDynamicViewportState (VkDevice device, VkDynamicViewportState dynamicViewportState) const
+{
+ return m_vk.destroyDynamicViewportState(device, dynamicViewportState);
+}
+
+VkResult DeviceDriver::createDynamicRasterState (VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo, VkDynamicRasterState* pState) const
+{
+ return m_vk.createDynamicRasterState(device, pCreateInfo, pState);
+}
+
+VkResult DeviceDriver::destroyDynamicRasterState (VkDevice device, VkDynamicRasterState dynamicRasterState) const
+{
+ return m_vk.destroyDynamicRasterState(device, dynamicRasterState);
+}
+
+VkResult DeviceDriver::createDynamicColorBlendState (VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo, VkDynamicColorBlendState* pState) const
+{
+ return m_vk.createDynamicColorBlendState(device, pCreateInfo, pState);
+}
+
+VkResult DeviceDriver::destroyDynamicColorBlendState (VkDevice device, VkDynamicColorBlendState dynamicColorBlendState) const
+{
+ return m_vk.destroyDynamicColorBlendState(device, dynamicColorBlendState);
+}
+
+VkResult DeviceDriver::createDynamicDepthStencilState (VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo, VkDynamicDepthStencilState* pState) const
+{
+ return m_vk.createDynamicDepthStencilState(device, pCreateInfo, pState);
+}
+
+VkResult DeviceDriver::destroyDynamicDepthStencilState (VkDevice device, VkDynamicDepthStencilState dynamicDepthStencilState) const
+{
+ return m_vk.destroyDynamicDepthStencilState(device, dynamicDepthStencilState);
+}
+
+VkResult DeviceDriver::createFramebuffer (VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer) const
+{
+ return m_vk.createFramebuffer(device, pCreateInfo, pFramebuffer);
+}
+
+VkResult DeviceDriver::destroyFramebuffer (VkDevice device, VkFramebuffer framebuffer) const
+{
+ return m_vk.destroyFramebuffer(device, framebuffer);
+}
+
+VkResult DeviceDriver::createRenderPass (VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass) const
+{
+ return m_vk.createRenderPass(device, pCreateInfo, pRenderPass);
+}
+
+VkResult DeviceDriver::destroyRenderPass (VkDevice device, VkRenderPass renderPass) const
+{
+ return m_vk.destroyRenderPass(device, renderPass);
+}
+
+VkResult DeviceDriver::getRenderAreaGranularity (VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity) const
+{
+ return m_vk.getRenderAreaGranularity(device, renderPass, pGranularity);
+}
+
+VkResult DeviceDriver::createCommandPool (VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo, VkCmdPool* pCmdPool) const
+{
+ return m_vk.createCommandPool(device, pCreateInfo, pCmdPool);
+}
+
+VkResult DeviceDriver::destroyCommandPool (VkDevice device, VkCmdPool cmdPool) const
+{
+ return m_vk.destroyCommandPool(device, cmdPool);
+}
+
+VkResult DeviceDriver::resetCommandPool (VkDevice device, VkCmdPool cmdPool, VkCmdPoolResetFlags flags) const
+{
+ return m_vk.resetCommandPool(device, cmdPool, flags);
+}
+
+VkResult DeviceDriver::createCommandBuffer (VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer) const
+{
+ return m_vk.createCommandBuffer(device, pCreateInfo, pCmdBuffer);
+}
+
+VkResult DeviceDriver::destroyCommandBuffer (VkDevice device, VkCmdBuffer commandBuffer) const
+{
+ return m_vk.destroyCommandBuffer(device, commandBuffer);
+}
+
+VkResult DeviceDriver::beginCommandBuffer (VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo) const
+{
+ return m_vk.beginCommandBuffer(cmdBuffer, pBeginInfo);
+}
+
+VkResult DeviceDriver::endCommandBuffer (VkCmdBuffer cmdBuffer) const
+{
+ return m_vk.endCommandBuffer(cmdBuffer);
+}
+
+VkResult DeviceDriver::resetCommandBuffer (VkCmdBuffer cmdBuffer, VkCmdBufferResetFlags flags) const
+{
+ return m_vk.resetCommandBuffer(cmdBuffer, flags);
+}
+
+void DeviceDriver::cmdBindPipeline (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const
+{
+ m_vk.cmdBindPipeline(cmdBuffer, pipelineBindPoint, pipeline);
+}
+
+void DeviceDriver::cmdBindDynamicViewportState (VkCmdBuffer cmdBuffer, VkDynamicViewportState dynamicViewportState) const
+{
+ m_vk.cmdBindDynamicViewportState(cmdBuffer, dynamicViewportState);
+}
+
+void DeviceDriver::cmdBindDynamicRasterState (VkCmdBuffer cmdBuffer, VkDynamicRasterState dynamicRasterState) const
+{
+ m_vk.cmdBindDynamicRasterState(cmdBuffer, dynamicRasterState);
+}
+
+void DeviceDriver::cmdBindDynamicColorBlendState (VkCmdBuffer cmdBuffer, VkDynamicColorBlendState dynamicColorBlendState) const
+{
+ m_vk.cmdBindDynamicColorBlendState(cmdBuffer, dynamicColorBlendState);
+}
+
+void DeviceDriver::cmdBindDynamicDepthStencilState (VkCmdBuffer cmdBuffer, VkDynamicDepthStencilState dynamicDepthStencilState) const
+{
+ m_vk.cmdBindDynamicDepthStencilState(cmdBuffer, dynamicDepthStencilState);
+}
+
+void DeviceDriver::cmdBindDescriptorSets (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, deUint32 firstSet, deUint32 setCount, const VkDescriptorSet* pDescriptorSets, deUint32 dynamicOffsetCount, const deUint32* pDynamicOffsets) const
+{
+ m_vk.cmdBindDescriptorSets(cmdBuffer, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
+}
+
+void DeviceDriver::cmdBindIndexBuffer (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const
+{
+ m_vk.cmdBindIndexBuffer(cmdBuffer, buffer, offset, indexType);
+}
+
+void DeviceDriver::cmdBindVertexBuffers (VkCmdBuffer cmdBuffer, deUint32 startBinding, deUint32 bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets) const
+{
+ m_vk.cmdBindVertexBuffers(cmdBuffer, startBinding, bindingCount, pBuffers, pOffsets);
+}
+
+void DeviceDriver::cmdDraw (VkCmdBuffer cmdBuffer, deUint32 firstVertex, deUint32 vertexCount, deUint32 firstInstance, deUint32 instanceCount) const
+{
+ m_vk.cmdDraw(cmdBuffer, firstVertex, vertexCount, firstInstance, instanceCount);
+}
+
+void DeviceDriver::cmdDrawIndexed (VkCmdBuffer cmdBuffer, deUint32 firstIndex, deUint32 indexCount, deInt32 vertexOffset, deUint32 firstInstance, deUint32 instanceCount) const
+{
+ m_vk.cmdDrawIndexed(cmdBuffer, firstIndex, indexCount, vertexOffset, firstInstance, instanceCount);
+}
+
+void DeviceDriver::cmdDrawIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride) const
+{
+ m_vk.cmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
+}
+
+void DeviceDriver::cmdDrawIndexedIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride) const
+{
+ m_vk.cmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
+}
+
+void DeviceDriver::cmdDispatch (VkCmdBuffer cmdBuffer, deUint32 x, deUint32 y, deUint32 z) const
+{
+ m_vk.cmdDispatch(cmdBuffer, x, y, z);
+}
+
+void DeviceDriver::cmdDispatchIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset) const
+{
+ m_vk.cmdDispatchIndirect(cmdBuffer, buffer, offset);
+}
+
+void DeviceDriver::cmdCopyBuffer (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, deUint32 regionCount, const VkBufferCopy* pRegions) const
+{
+ m_vk.cmdCopyBuffer(cmdBuffer, srcBuffer, destBuffer, regionCount, pRegions);
+}
+
+void DeviceDriver::cmdCopyImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageCopy* pRegions) const
+{
+ m_vk.cmdCopyImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
+}
+
+void DeviceDriver::cmdBlitImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageBlit* pRegions, VkTexFilter filter) const
+{
+ m_vk.cmdBlitImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions, filter);
+}
+
+void DeviceDriver::cmdCopyBufferToImage (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkBufferImageCopy* pRegions) const
+{
+ m_vk.cmdCopyBufferToImage(cmdBuffer, srcBuffer, destImage, destImageLayout, regionCount, pRegions);
+}
+
+void DeviceDriver::cmdCopyImageToBuffer (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, deUint32 regionCount, const VkBufferImageCopy* pRegions) const
+{
+ m_vk.cmdCopyImageToBuffer(cmdBuffer, srcImage, srcImageLayout, destBuffer, regionCount, pRegions);
+}
+
+void DeviceDriver::cmdUpdateBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const deUint32* pData) const
+{
+ m_vk.cmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
+}
+
+void DeviceDriver::cmdFillBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, deUint32 data) const
+{
+ m_vk.cmdFillBuffer(cmdBuffer, destBuffer, destOffset, fillSize, data);
+}
+
+void DeviceDriver::cmdClearColorImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rangeCount, const VkImageSubresourceRange* pRanges) const
+{
+ m_vk.cmdClearColorImage(cmdBuffer, image, imageLayout, pColor, rangeCount, pRanges);
+}
+
+void DeviceDriver::cmdClearDepthStencilImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rangeCount, const VkImageSubresourceRange* pRanges) const
+{
+ m_vk.cmdClearDepthStencilImage(cmdBuffer, image, imageLayout, depth, stencil, rangeCount, pRanges);
+}
+
+void DeviceDriver::cmdClearColorAttachment (VkCmdBuffer cmdBuffer, deUint32 colorAttachment, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rectCount, const VkRect3D* pRects) const
+{
+ m_vk.cmdClearColorAttachment(cmdBuffer, colorAttachment, imageLayout, pColor, rectCount, pRects);
+}
+
+void DeviceDriver::cmdClearDepthStencilAttachment (VkCmdBuffer cmdBuffer, VkImageAspectFlags imageAspectMask, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rectCount, const VkRect3D* pRects) const
+{
+ m_vk.cmdClearDepthStencilAttachment(cmdBuffer, imageAspectMask, imageLayout, depth, stencil, rectCount, pRects);
+}
+
+void DeviceDriver::cmdResolveImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageResolve* pRegions) const
+{
+ m_vk.cmdResolveImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
+}
+
+void DeviceDriver::cmdSetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) const
+{
+ m_vk.cmdSetEvent(cmdBuffer, event, stageMask);
+}
+
+void DeviceDriver::cmdResetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) const
+{
+ m_vk.cmdResetEvent(cmdBuffer, event, stageMask);
+}
+
+void DeviceDriver::cmdWaitEvents (VkCmdBuffer cmdBuffer, deUint32 eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, deUint32 memBarrierCount, const void* const* ppMemBarriers) const
+{
+ m_vk.cmdWaitEvents(cmdBuffer, eventCount, pEvents, srcStageMask, destStageMask, memBarrierCount, ppMemBarriers);
+}
+
+void DeviceDriver::cmdPipelineBarrier (VkCmdBuffer cmdBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, VkBool32 byRegion, deUint32 memBarrierCount, const void* const* ppMemBarriers) const
+{
+ m_vk.cmdPipelineBarrier(cmdBuffer, srcStageMask, destStageMask, byRegion, memBarrierCount, ppMemBarriers);
+}
+
+void DeviceDriver::cmdBeginQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot, VkQueryControlFlags flags) const
+{
+ m_vk.cmdBeginQuery(cmdBuffer, queryPool, slot, flags);
+}
+
+void DeviceDriver::cmdEndQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot) const
+{
+ m_vk.cmdEndQuery(cmdBuffer, queryPool, slot);
+}
+
+void DeviceDriver::cmdResetQueryPool (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount) const
+{
+ m_vk.cmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
+}
+
+void DeviceDriver::cmdWriteTimestamp (VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset) const
+{
+ m_vk.cmdWriteTimestamp(cmdBuffer, timestampType, destBuffer, destOffset);
+}
+
+void DeviceDriver::cmdCopyQueryPoolResults (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize destStride, VkQueryResultFlags flags) const
+{
+ m_vk.cmdCopyQueryPoolResults(cmdBuffer, queryPool, startQuery, queryCount, destBuffer, destOffset, destStride, flags);
+}
+
+void DeviceDriver::cmdPushConstants (VkCmdBuffer cmdBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, deUint32 start, deUint32 length, const void* values) const
+{
+ m_vk.cmdPushConstants(cmdBuffer, layout, stageFlags, start, length, values);
+}
+
+void DeviceDriver::cmdBeginRenderPass (VkCmdBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkRenderPassContents contents) const
+{
+ m_vk.cmdBeginRenderPass(cmdBuffer, pRenderPassBegin, contents);
+}
+
+void DeviceDriver::cmdNextSubpass (VkCmdBuffer cmdBuffer, VkRenderPassContents contents) const
+{
+ m_vk.cmdNextSubpass(cmdBuffer, contents);
+}
+
+void DeviceDriver::cmdEndRenderPass (VkCmdBuffer cmdBuffer) const
+{
+ m_vk.cmdEndRenderPass(cmdBuffer);
+}
+
+void DeviceDriver::cmdExecuteCommands (VkCmdBuffer cmdBuffer, deUint32 cmdBuffersCount, const VkCmdBuffer* pCmdBuffers) const
+{
+ m_vk.cmdExecuteCommands(cmdBuffer, cmdBuffersCount, pCmdBuffers);
+}
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+DestroyDeviceFunc destroyDevice;
+GetGlobalExtensionPropertiesFunc getGlobalExtensionProperties;
+GetPhysicalDeviceExtensionPropertiesFunc getPhysicalDeviceExtensionProperties;
+GetGlobalLayerPropertiesFunc getGlobalLayerProperties;
+GetPhysicalDeviceLayerPropertiesFunc getPhysicalDeviceLayerProperties;
+GetDeviceQueueFunc getDeviceQueue;
+QueueSubmitFunc queueSubmit;
+QueueWaitIdleFunc queueWaitIdle;
+DeviceWaitIdleFunc deviceWaitIdle;
+AllocMemoryFunc allocMemory;
+FreeMemoryFunc freeMemory;
+MapMemoryFunc mapMemory;
+UnmapMemoryFunc unmapMemory;
+FlushMappedMemoryRangesFunc flushMappedMemoryRanges;
+InvalidateMappedMemoryRangesFunc invalidateMappedMemoryRanges;
+GetDeviceMemoryCommitmentFunc getDeviceMemoryCommitment;
+BindBufferMemoryFunc bindBufferMemory;
+BindImageMemoryFunc bindImageMemory;
+GetBufferMemoryRequirementsFunc getBufferMemoryRequirements;
+GetImageMemoryRequirementsFunc getImageMemoryRequirements;
+GetImageSparseMemoryRequirementsFunc getImageSparseMemoryRequirements;
+GetPhysicalDeviceSparseImageFormatPropertiesFunc getPhysicalDeviceSparseImageFormatProperties;
+QueueBindSparseBufferMemoryFunc queueBindSparseBufferMemory;
+QueueBindSparseImageOpaqueMemoryFunc queueBindSparseImageOpaqueMemory;
+QueueBindSparseImageMemoryFunc queueBindSparseImageMemory;
+CreateFenceFunc createFence;
+DestroyFenceFunc destroyFence;
+ResetFencesFunc resetFences;
+GetFenceStatusFunc getFenceStatus;
+WaitForFencesFunc waitForFences;
+CreateSemaphoreFunc createSemaphore;
+DestroySemaphoreFunc destroySemaphore;
+QueueSignalSemaphoreFunc queueSignalSemaphore;
+QueueWaitSemaphoreFunc queueWaitSemaphore;
+CreateEventFunc createEvent;
+DestroyEventFunc destroyEvent;
+GetEventStatusFunc getEventStatus;
+SetEventFunc setEvent;
+ResetEventFunc resetEvent;
+CreateQueryPoolFunc createQueryPool;
+DestroyQueryPoolFunc destroyQueryPool;
+GetQueryPoolResultsFunc getQueryPoolResults;
+CreateBufferFunc createBuffer;
+DestroyBufferFunc destroyBuffer;
+CreateBufferViewFunc createBufferView;
+DestroyBufferViewFunc destroyBufferView;
+CreateImageFunc createImage;
+DestroyImageFunc destroyImage;
+GetImageSubresourceLayoutFunc getImageSubresourceLayout;
+CreateImageViewFunc createImageView;
+DestroyImageViewFunc destroyImageView;
+CreateAttachmentViewFunc createAttachmentView;
+DestroyAttachmentViewFunc destroyAttachmentView;
+CreateShaderModuleFunc createShaderModule;
+DestroyShaderModuleFunc destroyShaderModule;
+CreateShaderFunc createShader;
+DestroyShaderFunc destroyShader;
+CreatePipelineCacheFunc createPipelineCache;
+DestroyPipelineCacheFunc destroyPipelineCache;
+GetPipelineCacheSizeFunc getPipelineCacheSize;
+GetPipelineCacheDataFunc getPipelineCacheData;
+MergePipelineCachesFunc mergePipelineCaches;
+CreateGraphicsPipelinesFunc createGraphicsPipelines;
+CreateComputePipelinesFunc createComputePipelines;
+DestroyPipelineFunc destroyPipeline;
+CreatePipelineLayoutFunc createPipelineLayout;
+DestroyPipelineLayoutFunc destroyPipelineLayout;
+CreateSamplerFunc createSampler;
+DestroySamplerFunc destroySampler;
+CreateDescriptorSetLayoutFunc createDescriptorSetLayout;
+DestroyDescriptorSetLayoutFunc destroyDescriptorSetLayout;
+CreateDescriptorPoolFunc createDescriptorPool;
+DestroyDescriptorPoolFunc destroyDescriptorPool;
+ResetDescriptorPoolFunc resetDescriptorPool;
+AllocDescriptorSetsFunc allocDescriptorSets;
+FreeDescriptorSetsFunc freeDescriptorSets;
+UpdateDescriptorSetsFunc updateDescriptorSets;
+CreateDynamicViewportStateFunc createDynamicViewportState;
+DestroyDynamicViewportStateFunc destroyDynamicViewportState;
+CreateDynamicRasterStateFunc createDynamicRasterState;
+DestroyDynamicRasterStateFunc destroyDynamicRasterState;
+CreateDynamicColorBlendStateFunc createDynamicColorBlendState;
+DestroyDynamicColorBlendStateFunc destroyDynamicColorBlendState;
+CreateDynamicDepthStencilStateFunc createDynamicDepthStencilState;
+DestroyDynamicDepthStencilStateFunc destroyDynamicDepthStencilState;
+CreateFramebufferFunc createFramebuffer;
+DestroyFramebufferFunc destroyFramebuffer;
+CreateRenderPassFunc createRenderPass;
+DestroyRenderPassFunc destroyRenderPass;
+GetRenderAreaGranularityFunc getRenderAreaGranularity;
+CreateCommandPoolFunc createCommandPool;
+DestroyCommandPoolFunc destroyCommandPool;
+ResetCommandPoolFunc resetCommandPool;
+CreateCommandBufferFunc createCommandBuffer;
+DestroyCommandBufferFunc destroyCommandBuffer;
+BeginCommandBufferFunc beginCommandBuffer;
+EndCommandBufferFunc endCommandBuffer;
+ResetCommandBufferFunc resetCommandBuffer;
+CmdBindPipelineFunc cmdBindPipeline;
+CmdBindDynamicViewportStateFunc cmdBindDynamicViewportState;
+CmdBindDynamicRasterStateFunc cmdBindDynamicRasterState;
+CmdBindDynamicColorBlendStateFunc cmdBindDynamicColorBlendState;
+CmdBindDynamicDepthStencilStateFunc cmdBindDynamicDepthStencilState;
+CmdBindDescriptorSetsFunc cmdBindDescriptorSets;
+CmdBindIndexBufferFunc cmdBindIndexBuffer;
+CmdBindVertexBuffersFunc cmdBindVertexBuffers;
+CmdDrawFunc cmdDraw;
+CmdDrawIndexedFunc cmdDrawIndexed;
+CmdDrawIndirectFunc cmdDrawIndirect;
+CmdDrawIndexedIndirectFunc cmdDrawIndexedIndirect;
+CmdDispatchFunc cmdDispatch;
+CmdDispatchIndirectFunc cmdDispatchIndirect;
+CmdCopyBufferFunc cmdCopyBuffer;
+CmdCopyImageFunc cmdCopyImage;
+CmdBlitImageFunc cmdBlitImage;
+CmdCopyBufferToImageFunc cmdCopyBufferToImage;
+CmdCopyImageToBufferFunc cmdCopyImageToBuffer;
+CmdUpdateBufferFunc cmdUpdateBuffer;
+CmdFillBufferFunc cmdFillBuffer;
+CmdClearColorImageFunc cmdClearColorImage;
+CmdClearDepthStencilImageFunc cmdClearDepthStencilImage;
+CmdClearColorAttachmentFunc cmdClearColorAttachment;
+CmdClearDepthStencilAttachmentFunc cmdClearDepthStencilAttachment;
+CmdResolveImageFunc cmdResolveImage;
+CmdSetEventFunc cmdSetEvent;
+CmdResetEventFunc cmdResetEvent;
+CmdWaitEventsFunc cmdWaitEvents;
+CmdPipelineBarrierFunc cmdPipelineBarrier;
+CmdBeginQueryFunc cmdBeginQuery;
+CmdEndQueryFunc cmdEndQuery;
+CmdResetQueryPoolFunc cmdResetQueryPool;
+CmdWriteTimestampFunc cmdWriteTimestamp;
+CmdCopyQueryPoolResultsFunc cmdCopyQueryPoolResults;
+CmdPushConstantsFunc cmdPushConstants;
+CmdBeginRenderPassFunc cmdBeginRenderPass;
+CmdNextSubpassFunc cmdNextSubpass;
+CmdEndRenderPassFunc cmdEndRenderPass;
+CmdExecuteCommandsFunc cmdExecuteCommands;
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Instance and device initialization utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDeviceUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRefUtil.hpp"
+
+#include "tcuCommandLine.hpp"
+
+#include "qpInfo.h"
+
+#include <vector>
+
+namespace vk
+{
+
+using std::vector;
+
+Move<VkInstance> createDefaultInstance (const PlatformInterface& vkPlatform)
+{
+ const struct VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ "deqp", // const char* pAppName;
+ qpGetReleaseId(), // deUint32 appVersion;
+ "deqp", // const char* pEngineName;
+ qpGetReleaseId(), // deUint32 engineVersion;
+ VK_API_VERSION // deUint32 apiVersion;
+ };
+ const struct VkInstanceCreateInfo instanceInfo =
+ {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ &appInfo, // const VkApplicationInfo* pAppInfo;
+ DE_NULL, // const VkAllocCallbacks* pAllocCb;
+ 0u, // deUint32 layerCount;
+ DE_NULL, // const char*const* ppEnabledLayerNames;
+ 0u, // deUint32 extensionCount;
+ DE_NULL // const char*const* ppEnabledExtensionNames;
+ };
+
+ return createInstance(vkPlatform, &instanceInfo);
+}
+
+VkPhysicalDevice chooseDevice (const InstanceInterface& vkInstance, VkInstance instance, const tcu::CommandLine& cmdLine)
+{
+ const vector<VkPhysicalDevice> devices = enumeratePhysicalDevices(vkInstance, instance);
+
+ if (devices.empty())
+ TCU_THROW(NotSupportedError, "No Vulkan devices available");
+
+ if (!de::inBounds(cmdLine.getVKDeviceId(), 1, (int)devices.size()+1))
+ TCU_THROW(InternalError, "Invalid --deqp-vk-device-id");
+
+ return devices[(size_t)(cmdLine.getVKDeviceId()-1)];
+}
+
+} // vk
--- /dev/null
+#ifndef _VKDEVICEUTIL_HPP
+#define _VKDEVICEUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Instance and device initialization utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkRef.hpp"
+
+namespace tcu
+{
+class CommandLine;
+}
+
+namespace vk
+{
+
+Move<VkInstance> createDefaultInstance (const PlatformInterface& vkPlatform);
+VkPhysicalDevice chooseDevice (const InstanceInterface& vkInstance, VkInstance instance, const tcu::CommandLine& cmdLine);
+
+} // vk
+
+#endif // _VKDEVICEUTIL_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateInstanceFunc) (const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyInstanceFunc) (VkInstance instance);
+typedef VK_APICALL VkResult (VK_APIENTRY* EnumeratePhysicalDevicesFunc) (VkInstance instance, deUint32* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceFeaturesFunc) (VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceFormatPropertiesFunc) (VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceImageFormatPropertiesFunc) (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageFormatProperties* pImageFormatProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceLimitsFunc) (VkPhysicalDevice physicalDevice, VkPhysicalDeviceLimits* pLimits);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDevicePropertiesFunc) (VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceQueueCountFunc) (VkPhysicalDevice physicalDevice, deUint32* pCount);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceQueuePropertiesFunc) (VkPhysicalDevice physicalDevice, deUint32 count, VkPhysicalDeviceQueueProperties* pQueueProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceMemoryPropertiesFunc) (VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+typedef VK_APICALL PFN_vkVoidFunction (VK_APIENTRY* GetInstanceProcAddrFunc) (VkInstance instance, const char* pName);
+typedef VK_APICALL PFN_vkVoidFunction (VK_APIENTRY* GetDeviceProcAddrFunc) (VkDevice device, const char* pName);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDeviceFunc) (VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDeviceFunc) (VkDevice device);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetGlobalExtensionPropertiesFunc) (const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceExtensionPropertiesFunc) (VkPhysicalDevice physicalDevice, const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetGlobalLayerPropertiesFunc) (deUint32* pCount, VkLayerProperties* pProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceLayerPropertiesFunc) (VkPhysicalDevice physicalDevice, deUint32* pCount, VkLayerProperties* pProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetDeviceQueueFunc) (VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex, VkQueue* pQueue);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueSubmitFunc) (VkQueue queue, deUint32 cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueWaitIdleFunc) (VkQueue queue);
+typedef VK_APICALL VkResult (VK_APIENTRY* DeviceWaitIdleFunc) (VkDevice device);
+typedef VK_APICALL VkResult (VK_APIENTRY* AllocMemoryFunc) (VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem);
+typedef VK_APICALL VkResult (VK_APIENTRY* FreeMemoryFunc) (VkDevice device, VkDeviceMemory mem);
+typedef VK_APICALL VkResult (VK_APIENTRY* MapMemoryFunc) (VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);
+typedef VK_APICALL VkResult (VK_APIENTRY* UnmapMemoryFunc) (VkDevice device, VkDeviceMemory mem);
+typedef VK_APICALL VkResult (VK_APIENTRY* FlushMappedMemoryRangesFunc) (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges);
+typedef VK_APICALL VkResult (VK_APIENTRY* InvalidateMappedMemoryRangesFunc) (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetDeviceMemoryCommitmentFunc) (VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);
+typedef VK_APICALL VkResult (VK_APIENTRY* BindBufferMemoryFunc) (VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset);
+typedef VK_APICALL VkResult (VK_APIENTRY* BindImageMemoryFunc) (VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetBufferMemoryRequirementsFunc) (VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetImageMemoryRequirementsFunc) (VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetImageSparseMemoryRequirementsFunc) (VkDevice device, VkImage image, deUint32* pNumRequirements, VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPhysicalDeviceSparseImageFormatPropertiesFunc) (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, deUint32 samples, VkImageUsageFlags usage, VkImageTiling tiling, deUint32* pNumProperties, VkSparseImageFormatProperties* pProperties);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueBindSparseBufferMemoryFunc) (VkQueue queue, VkBuffer buffer, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueBindSparseImageOpaqueMemoryFunc) (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueBindSparseImageMemoryFunc) (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseImageMemoryBindInfo* pBindInfo);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateFenceFunc) (VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyFenceFunc) (VkDevice device, VkFence fence);
+typedef VK_APICALL VkResult (VK_APIENTRY* ResetFencesFunc) (VkDevice device, deUint32 fenceCount, const VkFence* pFences);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetFenceStatusFunc) (VkDevice device, VkFence fence);
+typedef VK_APICALL VkResult (VK_APIENTRY* WaitForFencesFunc) (VkDevice device, deUint32 fenceCount, const VkFence* pFences, VkBool32 waitAll, deUint64 timeout);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateSemaphoreFunc) (VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroySemaphoreFunc) (VkDevice device, VkSemaphore semaphore);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueSignalSemaphoreFunc) (VkQueue queue, VkSemaphore semaphore);
+typedef VK_APICALL VkResult (VK_APIENTRY* QueueWaitSemaphoreFunc) (VkQueue queue, VkSemaphore semaphore);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateEventFunc) (VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyEventFunc) (VkDevice device, VkEvent event);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetEventStatusFunc) (VkDevice device, VkEvent event);
+typedef VK_APICALL VkResult (VK_APIENTRY* SetEventFunc) (VkDevice device, VkEvent event);
+typedef VK_APICALL VkResult (VK_APIENTRY* ResetEventFunc) (VkDevice device, VkEvent event);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateQueryPoolFunc) (VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyQueryPoolFunc) (VkDevice device, VkQueryPool queryPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetQueryPoolResultsFunc) (VkDevice device, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, deUintptr* pDataSize, void* pData, VkQueryResultFlags flags);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateBufferFunc) (VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyBufferFunc) (VkDevice device, VkBuffer buffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateBufferViewFunc) (VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyBufferViewFunc) (VkDevice device, VkBufferView bufferView);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateImageFunc) (VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyImageFunc) (VkDevice device, VkImage image);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetImageSubresourceLayoutFunc) (VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateImageViewFunc) (VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyImageViewFunc) (VkDevice device, VkImageView imageView);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateAttachmentViewFunc) (VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo, VkAttachmentView* pView);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyAttachmentViewFunc) (VkDevice device, VkAttachmentView attachmentView);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateShaderModuleFunc) (VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModule* pShaderModule);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyShaderModuleFunc) (VkDevice device, VkShaderModule shaderModule);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateShaderFunc) (VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyShaderFunc) (VkDevice device, VkShader shader);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreatePipelineCacheFunc) (VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, VkPipelineCache* pPipelineCache);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyPipelineCacheFunc) (VkDevice device, VkPipelineCache pipelineCache);
+typedef VK_APICALL deUintptr (VK_APIENTRY* GetPipelineCacheSizeFunc) (VkDevice device, VkPipelineCache pipelineCache);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetPipelineCacheDataFunc) (VkDevice device, VkPipelineCache pipelineCache, void* pData);
+typedef VK_APICALL VkResult (VK_APIENTRY* MergePipelineCachesFunc) (VkDevice device, VkPipelineCache destCache, deUint32 srcCacheCount, const VkPipelineCache* pSrcCaches);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateGraphicsPipelinesFunc) (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkGraphicsPipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateComputePipelinesFunc) (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkComputePipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyPipelineFunc) (VkDevice device, VkPipeline pipeline);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreatePipelineLayoutFunc) (VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, VkPipelineLayout* pPipelineLayout);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyPipelineLayoutFunc) (VkDevice device, VkPipelineLayout pipelineLayout);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateSamplerFunc) (VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroySamplerFunc) (VkDevice device, VkSampler sampler);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDescriptorSetLayoutFunc) (VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDescriptorSetLayoutFunc) (VkDevice device, VkDescriptorSetLayout descriptorSetLayout);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDescriptorPoolFunc) (VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDescriptorPoolFunc) (VkDevice device, VkDescriptorPool descriptorPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* ResetDescriptorPoolFunc) (VkDevice device, VkDescriptorPool descriptorPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* AllocDescriptorSetsFunc) (VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, deUint32 count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, deUint32* pCount);
+typedef VK_APICALL VkResult (VK_APIENTRY* FreeDescriptorSetsFunc) (VkDevice device, VkDescriptorPool descriptorPool, deUint32 count, const VkDescriptorSet* pDescriptorSets);
+typedef VK_APICALL VkResult (VK_APIENTRY* UpdateDescriptorSetsFunc) (VkDevice device, deUint32 writeCount, const VkWriteDescriptorSet* pDescriptorWrites, deUint32 copyCount, const VkCopyDescriptorSet* pDescriptorCopies);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDynamicViewportStateFunc) (VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo, VkDynamicViewportState* pState);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDynamicViewportStateFunc) (VkDevice device, VkDynamicViewportState dynamicViewportState);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDynamicRasterStateFunc) (VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo, VkDynamicRasterState* pState);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDynamicRasterStateFunc) (VkDevice device, VkDynamicRasterState dynamicRasterState);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDynamicColorBlendStateFunc) (VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo, VkDynamicColorBlendState* pState);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDynamicColorBlendStateFunc) (VkDevice device, VkDynamicColorBlendState dynamicColorBlendState);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateDynamicDepthStencilStateFunc) (VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo, VkDynamicDepthStencilState* pState);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyDynamicDepthStencilStateFunc) (VkDevice device, VkDynamicDepthStencilState dynamicDepthStencilState);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateFramebufferFunc) (VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyFramebufferFunc) (VkDevice device, VkFramebuffer framebuffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateRenderPassFunc) (VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyRenderPassFunc) (VkDevice device, VkRenderPass renderPass);
+typedef VK_APICALL VkResult (VK_APIENTRY* GetRenderAreaGranularityFunc) (VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateCommandPoolFunc) (VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo, VkCmdPool* pCmdPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyCommandPoolFunc) (VkDevice device, VkCmdPool cmdPool);
+typedef VK_APICALL VkResult (VK_APIENTRY* ResetCommandPoolFunc) (VkDevice device, VkCmdPool cmdPool, VkCmdPoolResetFlags flags);
+typedef VK_APICALL VkResult (VK_APIENTRY* CreateCommandBufferFunc) (VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* DestroyCommandBufferFunc) (VkDevice device, VkCmdBuffer commandBuffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* BeginCommandBufferFunc) (VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo);
+typedef VK_APICALL VkResult (VK_APIENTRY* EndCommandBufferFunc) (VkCmdBuffer cmdBuffer);
+typedef VK_APICALL VkResult (VK_APIENTRY* ResetCommandBufferFunc) (VkCmdBuffer cmdBuffer, VkCmdBufferResetFlags flags);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindPipelineFunc) (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindDynamicViewportStateFunc) (VkCmdBuffer cmdBuffer, VkDynamicViewportState dynamicViewportState);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindDynamicRasterStateFunc) (VkCmdBuffer cmdBuffer, VkDynamicRasterState dynamicRasterState);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindDynamicColorBlendStateFunc) (VkCmdBuffer cmdBuffer, VkDynamicColorBlendState dynamicColorBlendState);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindDynamicDepthStencilStateFunc) (VkCmdBuffer cmdBuffer, VkDynamicDepthStencilState dynamicDepthStencilState);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindDescriptorSetsFunc) (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, deUint32 firstSet, deUint32 setCount, const VkDescriptorSet* pDescriptorSets, deUint32 dynamicOffsetCount, const deUint32* pDynamicOffsets);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindIndexBufferFunc) (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef VK_APICALL void (VK_APIENTRY* CmdBindVertexBuffersFunc) (VkCmdBuffer cmdBuffer, deUint32 startBinding, deUint32 bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+typedef VK_APICALL void (VK_APIENTRY* CmdDrawFunc) (VkCmdBuffer cmdBuffer, deUint32 firstVertex, deUint32 vertexCount, deUint32 firstInstance, deUint32 instanceCount);
+typedef VK_APICALL void (VK_APIENTRY* CmdDrawIndexedFunc) (VkCmdBuffer cmdBuffer, deUint32 firstIndex, deUint32 indexCount, deInt32 vertexOffset, deUint32 firstInstance, deUint32 instanceCount);
+typedef VK_APICALL void (VK_APIENTRY* CmdDrawIndirectFunc) (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride);
+typedef VK_APICALL void (VK_APIENTRY* CmdDrawIndexedIndirectFunc) (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride);
+typedef VK_APICALL void (VK_APIENTRY* CmdDispatchFunc) (VkCmdBuffer cmdBuffer, deUint32 x, deUint32 y, deUint32 z);
+typedef VK_APICALL void (VK_APIENTRY* CmdDispatchIndirectFunc) (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset);
+typedef VK_APICALL void (VK_APIENTRY* CmdCopyBufferFunc) (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, deUint32 regionCount, const VkBufferCopy* pRegions);
+typedef VK_APICALL void (VK_APIENTRY* CmdCopyImageFunc) (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageCopy* pRegions);
+typedef VK_APICALL void (VK_APIENTRY* CmdBlitImageFunc) (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageBlit* pRegions, VkTexFilter filter);
+typedef VK_APICALL void (VK_APIENTRY* CmdCopyBufferToImageFunc) (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkBufferImageCopy* pRegions);
+typedef VK_APICALL void (VK_APIENTRY* CmdCopyImageToBufferFunc) (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, deUint32 regionCount, const VkBufferImageCopy* pRegions);
+typedef VK_APICALL void (VK_APIENTRY* CmdUpdateBufferFunc) (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const deUint32* pData);
+typedef VK_APICALL void (VK_APIENTRY* CmdFillBufferFunc) (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, deUint32 data);
+typedef VK_APICALL void (VK_APIENTRY* CmdClearColorImageFunc) (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rangeCount, const VkImageSubresourceRange* pRanges);
+typedef VK_APICALL void (VK_APIENTRY* CmdClearDepthStencilImageFunc) (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rangeCount, const VkImageSubresourceRange* pRanges);
+typedef VK_APICALL void (VK_APIENTRY* CmdClearColorAttachmentFunc) (VkCmdBuffer cmdBuffer, deUint32 colorAttachment, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rectCount, const VkRect3D* pRects);
+typedef VK_APICALL void (VK_APIENTRY* CmdClearDepthStencilAttachmentFunc) (VkCmdBuffer cmdBuffer, VkImageAspectFlags imageAspectMask, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rectCount, const VkRect3D* pRects);
+typedef VK_APICALL void (VK_APIENTRY* CmdResolveImageFunc) (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageResolve* pRegions);
+typedef VK_APICALL void (VK_APIENTRY* CmdSetEventFunc) (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef VK_APICALL void (VK_APIENTRY* CmdResetEventFunc) (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef VK_APICALL void (VK_APIENTRY* CmdWaitEventsFunc) (VkCmdBuffer cmdBuffer, deUint32 eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, deUint32 memBarrierCount, const void* const* ppMemBarriers);
+typedef VK_APICALL void (VK_APIENTRY* CmdPipelineBarrierFunc) (VkCmdBuffer cmdBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, VkBool32 byRegion, deUint32 memBarrierCount, const void* const* ppMemBarriers);
+typedef VK_APICALL void (VK_APIENTRY* CmdBeginQueryFunc) (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot, VkQueryControlFlags flags);
+typedef VK_APICALL void (VK_APIENTRY* CmdEndQueryFunc) (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot);
+typedef VK_APICALL void (VK_APIENTRY* CmdResetQueryPoolFunc) (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount);
+typedef VK_APICALL void (VK_APIENTRY* CmdWriteTimestampFunc) (VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset);
+typedef VK_APICALL void (VK_APIENTRY* CmdCopyQueryPoolResultsFunc) (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize destStride, VkQueryResultFlags flags);
+typedef VK_APICALL void (VK_APIENTRY* CmdPushConstantsFunc) (VkCmdBuffer cmdBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, deUint32 start, deUint32 length, const void* values);
+typedef VK_APICALL void (VK_APIENTRY* CmdBeginRenderPassFunc) (VkCmdBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkRenderPassContents contents);
+typedef VK_APICALL void (VK_APIENTRY* CmdNextSubpassFunc) (VkCmdBuffer cmdBuffer, VkRenderPassContents contents);
+typedef VK_APICALL void (VK_APIENTRY* CmdEndRenderPassFunc) (VkCmdBuffer cmdBuffer);
+typedef VK_APICALL void (VK_APIENTRY* CmdExecuteCommandsFunc) (VkCmdBuffer cmdBuffer, deUint32 cmdBuffersCount, const VkCmdBuffer* pCmdBuffers);
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief GLSL to SPIR-V.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkGlslToSpirV.hpp"
+#include "deArrayUtil.hpp"
+#include "deMemory.h"
+#include "deClock.h"
+#include "qpDebugOut.h"
+
+#if defined(DEQP_HAVE_GLSLANG)
+# include "deSingleton.h"
+# include "deMutex.hpp"
+
+# include "SPIRV/GlslangToSpv.h"
+# include "SPIRV/disassemble.h"
+# include "SPIRV/doc.h"
+# include "glslang/Include/InfoSink.h"
+# include "glslang/Include/ShHandle.h"
+# include "glslang/MachineIndependent/localintermediate.h"
+# include "glslang/Public/ShaderLang.h"
+
+#endif
+
+namespace vk
+{
+
+using std::string;
+using std::vector;
+
+#if defined(DEQP_HAVE_GLSLANG)
+
+namespace
+{
+
+EShLanguage getGlslangStage (glu::ShaderType type)
+{
+ static const EShLanguage stageMap[] =
+ {
+ EShLangVertex,
+ EShLangFragment,
+ EShLangGeometry,
+ EShLangTessControl,
+ EShLangTessEvaluation,
+ EShLangCompute,
+ };
+ return de::getSizedArrayElement<glu::SHADERTYPE_LAST>(stageMap, type);
+}
+
+static volatile deSingletonState s_glslangInitState = DE_SINGLETON_STATE_NOT_INITIALIZED;
+static de::Mutex s_glslangLock;
+
+void initGlslang (void*)
+{
+ // Main compiler
+ ShInitialize();
+
+ // SPIR-V disassembly
+ spv::Parameterize();
+}
+
+void prepareGlslang (void)
+{
+ deInitSingleton(&s_glslangInitState, initGlslang, DE_NULL);
+}
+
+class SpvGenerator : public TCompiler
+{
+public:
+ SpvGenerator (EShLanguage language, std::vector<deUint32>& dst, TInfoSink& infoSink)
+ : TCompiler (language, infoSink)
+ , m_dst (dst)
+ {
+ }
+
+ bool compile (TIntermNode* root, int version = 0, EProfile profile = ENoProfile)
+ {
+ glslang::TIntermediate intermediate(getLanguage(), version, profile);
+ intermediate.setTreeRoot(root);
+ intermediate.finalCheck(getInfoSink());
+ glslang::GlslangToSpv(intermediate, m_dst);
+ return true;
+ }
+
+private:
+ std::vector<deUint32>& m_dst;
+};
+
+// \todo [2015-06-19 pyry] Specialize these per GLSL version
+
+// Fail compilation if more members are added to TLimits or TBuiltInResource
+struct LimitsSizeHelper_s { bool m0, m1, m2, m3, m4, m5, m6, m7, m8; };
+struct BuiltInResourceSizeHelper_s { int m[83]; LimitsSizeHelper_s l; };
+
+DE_STATIC_ASSERT(sizeof(TLimits) == sizeof(LimitsSizeHelper_s));
+DE_STATIC_ASSERT(sizeof(TBuiltInResource) == sizeof(BuiltInResourceSizeHelper_s));
+
+void getDefaultLimits (TLimits* limits)
+{
+ limits->nonInductiveForLoops = true;
+ limits->whileLoops = true;
+ limits->doWhileLoops = true;
+ limits->generalUniformIndexing = true;
+ limits->generalAttributeMatrixVectorIndexing = true;
+ limits->generalVaryingIndexing = true;
+ limits->generalSamplerIndexing = true;
+ limits->generalVariableIndexing = true;
+ limits->generalConstantMatrixVectorIndexing = true;
+}
+
+void getDefaultBuiltInResources (TBuiltInResource* builtin)
+{
+ getDefaultLimits(&builtin->limits);
+
+ builtin->maxLights = 32;
+ builtin->maxClipPlanes = 6;
+ builtin->maxTextureUnits = 32;
+ builtin->maxTextureCoords = 32;
+ builtin->maxVertexAttribs = 64;
+ builtin->maxVertexUniformComponents = 4096;
+ builtin->maxVaryingFloats = 64;
+ builtin->maxVertexTextureImageUnits = 32;
+ builtin->maxCombinedTextureImageUnits = 80;
+ builtin->maxTextureImageUnits = 32;
+ builtin->maxFragmentUniformComponents = 4096;
+ builtin->maxDrawBuffers = 32;
+ builtin->maxVertexUniformVectors = 128;
+ builtin->maxVaryingVectors = 8;
+ builtin->maxFragmentUniformVectors = 16;
+ builtin->maxVertexOutputVectors = 16;
+ builtin->maxFragmentInputVectors = 15;
+ builtin->minProgramTexelOffset = -8;
+ builtin->maxProgramTexelOffset = 7;
+ builtin->maxClipDistances = 8;
+ builtin->maxComputeWorkGroupCountX = 65535;
+ builtin->maxComputeWorkGroupCountY = 65535;
+ builtin->maxComputeWorkGroupCountZ = 65535;
+ builtin->maxComputeWorkGroupSizeX = 1024;
+ builtin->maxComputeWorkGroupSizeX = 1024;
+ builtin->maxComputeWorkGroupSizeZ = 64;
+ builtin->maxComputeUniformComponents = 1024;
+ builtin->maxComputeTextureImageUnits = 16;
+ builtin->maxComputeImageUniforms = 8;
+ builtin->maxComputeAtomicCounters = 8;
+ builtin->maxComputeAtomicCounterBuffers = 1;
+ builtin->maxVaryingComponents = 60;
+ builtin->maxVertexOutputComponents = 64;
+ builtin->maxGeometryInputComponents = 64;
+ builtin->maxGeometryOutputComponents = 128;
+ builtin->maxFragmentInputComponents = 128;
+ builtin->maxImageUnits = 8;
+ builtin->maxCombinedImageUnitsAndFragmentOutputs = 8;
+ builtin->maxCombinedShaderOutputResources = 8;
+ builtin->maxImageSamples = 0;
+ builtin->maxVertexImageUniforms = 0;
+ builtin->maxTessControlImageUniforms = 0;
+ builtin->maxTessEvaluationImageUniforms = 0;
+ builtin->maxGeometryImageUniforms = 0;
+ builtin->maxFragmentImageUniforms = 8;
+ builtin->maxCombinedImageUniforms = 8;
+ builtin->maxGeometryTextureImageUnits = 16;
+ builtin->maxGeometryOutputVertices = 256;
+ builtin->maxGeometryTotalOutputComponents = 1024;
+ builtin->maxGeometryUniformComponents = 1024;
+ builtin->maxGeometryVaryingComponents = 64;
+ builtin->maxTessControlInputComponents = 128;
+ builtin->maxTessControlOutputComponents = 128;
+ builtin->maxTessControlTextureImageUnits = 16;
+ builtin->maxTessControlUniformComponents = 1024;
+ builtin->maxTessControlTotalOutputComponents = 4096;
+ builtin->maxTessEvaluationInputComponents = 128;
+ builtin->maxTessEvaluationOutputComponents = 128;
+ builtin->maxTessEvaluationTextureImageUnits = 16;
+ builtin->maxTessEvaluationUniformComponents = 1024;
+ builtin->maxTessPatchComponents = 120;
+ builtin->maxPatchVertices = 32;
+ builtin->maxTessGenLevel = 64;
+ builtin->maxViewports = 16;
+ builtin->maxVertexAtomicCounters = 0;
+ builtin->maxTessControlAtomicCounters = 0;
+ builtin->maxTessEvaluationAtomicCounters = 0;
+ builtin->maxGeometryAtomicCounters = 0;
+ builtin->maxFragmentAtomicCounters = 8;
+ builtin->maxCombinedAtomicCounters = 8;
+ builtin->maxAtomicCounterBindings = 1;
+ builtin->maxVertexAtomicCounterBuffers = 0;
+ builtin->maxTessControlAtomicCounterBuffers = 0;
+ builtin->maxTessEvaluationAtomicCounterBuffers = 0;
+ builtin->maxGeometryAtomicCounterBuffers = 0;
+ builtin->maxFragmentAtomicCounterBuffers = 1;
+ builtin->maxCombinedAtomicCounterBuffers = 1;
+ builtin->maxAtomicCounterBufferSize = 16384;
+ builtin->maxTransformFeedbackBuffers = 4;
+ builtin->maxTransformFeedbackInterleavedComponents = 64;
+ builtin->maxCullDistances = 8;
+ builtin->maxCombinedClipAndCullDistances = 8;
+ builtin->maxSamples = 4;
+};
+
+} // anonymous
+
+void glslToSpirV (const glu::ProgramSources& program, std::vector<deUint8>* dst, glu::ShaderProgramInfo* buildInfo)
+{
+ TBuiltInResource builtinRes;
+
+ prepareGlslang();
+ getDefaultBuiltInResources(&builtinRes);
+
+ // \note Compiles only first found shader
+ for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
+ {
+ if (!program.sources[shaderType].empty())
+ {
+ de::ScopedLock compileLock (s_glslangLock);
+ const std::string& srcText = program.sources[shaderType][0];
+ const char* srcPtrs[] = { srcText.c_str() };
+ int srcLengths[] = { (int)srcText.size() };
+ vector<deUint32> spvBlob;
+ TInfoSink infoSink;
+ SpvGenerator compiler (getGlslangStage(glu::ShaderType(shaderType)), spvBlob, infoSink);
+ const deUint64 compileStartTime = deGetMicroseconds();
+ const int compileOk = ShCompile(static_cast<ShHandle>(&compiler), srcPtrs, DE_LENGTH_OF_ARRAY(srcPtrs), srcLengths, EShOptNone, &builtinRes, 0);
+
+ {
+ glu::ShaderInfo shaderBuildInfo;
+
+ shaderBuildInfo.type = (glu::ShaderType)shaderType;
+ shaderBuildInfo.source = srcText;
+ shaderBuildInfo.infoLog = infoSink.info.c_str(); // \todo [2015-07-13 pyry] Include debug log?
+ shaderBuildInfo.compileTimeUs = deGetMicroseconds()-compileStartTime;
+ shaderBuildInfo.compileOk = (compileOk != 0);
+
+ buildInfo->shaders.push_back(shaderBuildInfo);
+ }
+
+ buildInfo->program.infoLog = "(No linking performed)";
+ buildInfo->program.linkOk = (compileOk != 0);
+ buildInfo->program.linkTimeUs = 0;
+
+ if (compileOk == 0)
+ TCU_FAIL("Failed to compile shader");
+
+ dst->resize(spvBlob.size() * sizeof(deUint32));
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ deMemcpy(&(*dst)[0], &spvBlob[0], dst->size());
+#else
+# error "Big-endian not supported"
+#endif
+
+ return;
+ }
+ }
+
+ TCU_THROW(InternalError, "Can't compile empty program");
+}
+
+void disassembleSpirV (size_t binarySize, const deUint8* binary, std::ostream* dst)
+{
+ std::vector<deUint32> binForDisasm (binarySize/4);
+
+ DE_ASSERT(binarySize%4 == 0);
+
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ deMemcpy(&binForDisasm[0], binary, binarySize);
+#else
+# error "Big-endian not supported"
+#endif
+
+ spv::Disassemble(*dst, binForDisasm);
+}
+
+#else // defined(DEQP_HAVE_GLSLANG)
+
+void glslToSpirV (const glu::ProgramSources&, std::vector<deUint8>*, glu::ShaderProgramInfo*)
+{
+ TCU_THROW(NotSupportedError, "GLSL to SPIR-V compilation not supported (DEQP_HAVE_GLSLANG not defined)");
+}
+
+void disassembleSpirV (size_t, const deUint8*, std::ostream*)
+{
+ TCU_THROW(NotSupportedError, "SPIR-V disassembling not supported (DEQP_HAVE_GLSLANG not defined)");
+}
+
+#endif
+
+} // vk
--- /dev/null
+#ifndef _VKGLSLTOSPIRV_HPP
+#define _VKGLSLTOSPIRV_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief GLSL to SPIR-V.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkPrograms.hpp"
+#include "gluShaderProgram.hpp"
+
+#include <ostream>
+
+namespace vk
+{
+
+//! Compile GLSL program to SPIR-V. Will fail with NotSupportedError if compiler is not available.
+void glslToSpirV (const glu::ProgramSources& src, std::vector<deUint8>* dst, glu::ShaderProgramInfo* buildInfo);
+
+//! Disassemble SPIR-V binary
+void disassembleSpirV (size_t binarySize, const deUint8* binary, std::ostream* dst);
+
+} // vk
+
+#endif // _VKGLSLTOSPIRV_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+enum HandleType
+{
+ HANDLE_TYPE_INSTANCE = 0,
+ HANDLE_TYPE_PHYSICAL_DEVICE,
+ HANDLE_TYPE_DEVICE,
+ HANDLE_TYPE_QUEUE,
+ HANDLE_TYPE_CMD_BUFFER,
+ HANDLE_TYPE_FENCE,
+ HANDLE_TYPE_DEVICE_MEMORY,
+ HANDLE_TYPE_BUFFER,
+ HANDLE_TYPE_IMAGE,
+ HANDLE_TYPE_SEMAPHORE,
+ HANDLE_TYPE_EVENT,
+ HANDLE_TYPE_QUERY_POOL,
+ HANDLE_TYPE_BUFFER_VIEW,
+ HANDLE_TYPE_IMAGE_VIEW,
+ HANDLE_TYPE_ATTACHMENT_VIEW,
+ HANDLE_TYPE_SHADER_MODULE,
+ HANDLE_TYPE_SHADER,
+ HANDLE_TYPE_PIPELINE_CACHE,
+ HANDLE_TYPE_PIPELINE_LAYOUT,
+ HANDLE_TYPE_RENDER_PASS,
+ HANDLE_TYPE_PIPELINE,
+ HANDLE_TYPE_DESCRIPTOR_SET_LAYOUT,
+ HANDLE_TYPE_SAMPLER,
+ HANDLE_TYPE_DESCRIPTOR_POOL,
+ HANDLE_TYPE_DESCRIPTOR_SET,
+ HANDLE_TYPE_DYNAMIC_VIEWPORT_STATE,
+ HANDLE_TYPE_DYNAMIC_RASTER_STATE,
+ HANDLE_TYPE_DYNAMIC_COLOR_BLEND_STATE,
+ HANDLE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE,
+ HANDLE_TYPE_FRAMEBUFFER,
+ HANDLE_TYPE_CMD_POOL,
+ HANDLE_TYPE_LAST
+};
+
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for images.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkImageUtil.hpp"
+#include "tcuTextureUtil.hpp"
+
+namespace vk
+{
+
+bool isFloatFormat (VkFormat format)
+{
+ return tcu::getTextureChannelClass(mapVkFormat(format).type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT;
+}
+
+bool isUnormFormat (VkFormat format)
+{
+ return tcu::getTextureChannelClass(mapVkFormat(format).type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT;
+}
+
+bool isSnormFormat (VkFormat format)
+{
+ return tcu::getTextureChannelClass(mapVkFormat(format).type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT;
+}
+
+bool isIntFormat (VkFormat format)
+{
+ return tcu::getTextureChannelClass(mapVkFormat(format).type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
+}
+
+bool isUintFormat (VkFormat format)
+{
+ return tcu::getTextureChannelClass(mapVkFormat(format).type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER;
+}
+
+bool isDepthStencilFormat (VkFormat format)
+{
+ const tcu::TextureFormat tcuFormat = mapVkFormat(format);
+ return tcuFormat.order == tcu::TextureFormat::D || tcuFormat.order == tcu::TextureFormat::S || tcuFormat.order == tcu::TextureFormat::DS;
+}
+
+bool isCompressedFormat (VkFormat format)
+{
+ switch (format)
+ {
+ case VK_FORMAT_BC1_RGBA_SRGB:
+ case VK_FORMAT_BC1_RGBA_UNORM:
+ case VK_FORMAT_BC1_RGB_SRGB:
+ case VK_FORMAT_BC1_RGB_UNORM:
+ case VK_FORMAT_BC2_UNORM:
+ case VK_FORMAT_BC2_SRGB:
+ case VK_FORMAT_BC3_UNORM:
+ case VK_FORMAT_BC3_SRGB:
+ case VK_FORMAT_BC4_UNORM:
+ case VK_FORMAT_BC4_SNORM:
+ case VK_FORMAT_BC5_UNORM:
+ case VK_FORMAT_BC5_SNORM:
+ case VK_FORMAT_BC6H_UFLOAT:
+ case VK_FORMAT_BC6H_SFLOAT:
+ case VK_FORMAT_BC7_UNORM:
+ case VK_FORMAT_BC7_SRGB:
+ case VK_FORMAT_ETC2_R8G8B8_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM:
+ case VK_FORMAT_EAC_R11_UNORM:
+ case VK_FORMAT_EAC_R11_SNORM:
+ case VK_FORMAT_EAC_R11G11_UNORM:
+ case VK_FORMAT_EAC_R11G11_SNORM:
+ case VK_FORMAT_ASTC_4x4_UNORM:
+ case VK_FORMAT_ASTC_4x4_SRGB:
+ case VK_FORMAT_ASTC_5x4_UNORM:
+ case VK_FORMAT_ASTC_5x4_SRGB:
+ case VK_FORMAT_ASTC_5x5_UNORM:
+ case VK_FORMAT_ASTC_5x5_SRGB:
+ case VK_FORMAT_ASTC_6x5_UNORM:
+ case VK_FORMAT_ASTC_6x5_SRGB:
+ case VK_FORMAT_ASTC_6x6_UNORM:
+ case VK_FORMAT_ASTC_6x6_SRGB:
+ case VK_FORMAT_ASTC_8x5_UNORM:
+ case VK_FORMAT_ASTC_8x5_SRGB:
+ case VK_FORMAT_ASTC_8x6_UNORM:
+ case VK_FORMAT_ASTC_8x6_SRGB:
+ case VK_FORMAT_ASTC_8x8_UNORM:
+ case VK_FORMAT_ASTC_8x8_SRGB:
+ case VK_FORMAT_ASTC_10x5_UNORM:
+ case VK_FORMAT_ASTC_10x5_SRGB:
+ case VK_FORMAT_ASTC_10x6_UNORM:
+ case VK_FORMAT_ASTC_10x6_SRGB:
+ case VK_FORMAT_ASTC_10x8_UNORM:
+ case VK_FORMAT_ASTC_10x8_SRGB:
+ case VK_FORMAT_ASTC_10x10_UNORM:
+ case VK_FORMAT_ASTC_10x10_SRGB:
+ case VK_FORMAT_ASTC_12x10_UNORM:
+ case VK_FORMAT_ASTC_12x10_SRGB:
+ case VK_FORMAT_ASTC_12x12_UNORM:
+ case VK_FORMAT_ASTC_12x12_SRGB:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+VkFormat mapTextureFormat (const tcu::TextureFormat& format)
+{
+ DE_STATIC_ASSERT(tcu::TextureFormat::CHANNELORDER_LAST < (1<<16));
+ DE_STATIC_ASSERT(tcu::TextureFormat::CHANNELTYPE_LAST < (1<<16));
+
+#define PACK_FMT(ORDER, TYPE) ((int(ORDER) << 16) | int(TYPE))
+#define FMT_CASE(ORDER, TYPE) PACK_FMT(tcu::TextureFormat::ORDER, tcu::TextureFormat::TYPE)
+
+ switch (PACK_FMT(format.order, format.type))
+ {
+ case FMT_CASE(RGB, UNORM_SHORT_565): return VK_FORMAT_R5G6B5_UNORM;
+ case FMT_CASE(RGBA, UNORM_SHORT_5551): return VK_FORMAT_R5G5B5A1_UNORM;
+
+ case FMT_CASE(R, UNORM_INT8): return VK_FORMAT_R8_UNORM;
+ case FMT_CASE(R, SNORM_INT8): return VK_FORMAT_R8_SNORM;
+ case FMT_CASE(R, UNSIGNED_INT8): return VK_FORMAT_R8_UINT;
+ case FMT_CASE(R, SIGNED_INT8): return VK_FORMAT_R8_SINT;
+ case FMT_CASE(sR, UNORM_INT8): return VK_FORMAT_R8_SRGB;
+
+ case FMT_CASE(RG, UNORM_INT8): return VK_FORMAT_R8G8_UNORM;
+ case FMT_CASE(RG, SNORM_INT8): return VK_FORMAT_R8G8_SNORM;
+ case FMT_CASE(RG, UNSIGNED_INT8): return VK_FORMAT_R8G8_UINT;
+ case FMT_CASE(RG, SIGNED_INT8): return VK_FORMAT_R8G8_SINT;
+ case FMT_CASE(sRG, UNORM_INT8): return VK_FORMAT_R8G8_SRGB;
+
+ case FMT_CASE(RGB, UNORM_INT8): return VK_FORMAT_R8G8B8_UNORM;
+ case FMT_CASE(RGB, SNORM_INT8): return VK_FORMAT_R8G8B8_SNORM;
+ case FMT_CASE(RGB, UNSIGNED_INT8): return VK_FORMAT_R8G8B8_UINT;
+ case FMT_CASE(RGB, SIGNED_INT8): return VK_FORMAT_R8G8B8_SINT;
+ case FMT_CASE(sRGB, UNORM_INT8): return VK_FORMAT_R8G8B8_SRGB;
+
+ case FMT_CASE(RGBA, UNORM_INT8): return VK_FORMAT_R8G8B8A8_UNORM;
+ case FMT_CASE(RGBA, SNORM_INT8): return VK_FORMAT_R8G8B8A8_SNORM;
+ case FMT_CASE(RGBA, UNSIGNED_INT8): return VK_FORMAT_R8G8B8A8_UINT;
+ case FMT_CASE(RGBA, SIGNED_INT8): return VK_FORMAT_R8G8B8A8_SINT;
+ case FMT_CASE(sRGBA, UNORM_INT8): return VK_FORMAT_R8G8B8A8_SRGB;
+
+ case FMT_CASE(RGBA, UNORM_INT_1010102_REV): return VK_FORMAT_R10G10B10A2_UNORM;
+ case FMT_CASE(RGBA, UNSIGNED_INT_1010102_REV): return VK_FORMAT_R10G10B10A2_UINT;
+
+ case FMT_CASE(R, UNORM_INT16): return VK_FORMAT_R16_UNORM;
+ case FMT_CASE(R, SNORM_INT16): return VK_FORMAT_R16_SNORM;
+ case FMT_CASE(R, UNSIGNED_INT16): return VK_FORMAT_R16_UINT;
+ case FMT_CASE(R, SIGNED_INT16): return VK_FORMAT_R16_SINT;
+ case FMT_CASE(R, HALF_FLOAT): return VK_FORMAT_R16_SFLOAT;
+
+ case FMT_CASE(RG, UNORM_INT16): return VK_FORMAT_R16G16_UNORM;
+ case FMT_CASE(RG, SNORM_INT16): return VK_FORMAT_R16G16_SNORM;
+ case FMT_CASE(RG, UNSIGNED_INT16): return VK_FORMAT_R16G16_UINT;
+ case FMT_CASE(RG, SIGNED_INT16): return VK_FORMAT_R16G16_SINT;
+ case FMT_CASE(RG, HALF_FLOAT): return VK_FORMAT_R16G16_SFLOAT;
+
+ case FMT_CASE(RGB, UNORM_INT16): return VK_FORMAT_R16G16B16_UNORM;
+ case FMT_CASE(RGB, SNORM_INT16): return VK_FORMAT_R16G16B16_SNORM;
+ case FMT_CASE(RGB, UNSIGNED_INT16): return VK_FORMAT_R16G16B16_UINT;
+ case FMT_CASE(RGB, SIGNED_INT16): return VK_FORMAT_R16G16B16_SINT;
+ case FMT_CASE(RGB, HALF_FLOAT): return VK_FORMAT_R16G16B16_SFLOAT;
+
+ case FMT_CASE(RGBA, UNORM_INT16): return VK_FORMAT_R16G16B16A16_UNORM;
+ case FMT_CASE(RGBA, SNORM_INT16): return VK_FORMAT_R16G16B16A16_SNORM;
+ case FMT_CASE(RGBA, UNSIGNED_INT16): return VK_FORMAT_R16G16B16A16_UINT;
+ case FMT_CASE(RGBA, SIGNED_INT16): return VK_FORMAT_R16G16B16A16_SINT;
+ case FMT_CASE(RGBA, HALF_FLOAT): return VK_FORMAT_R16G16B16A16_SFLOAT;
+
+ case FMT_CASE(R, UNSIGNED_INT32): return VK_FORMAT_R32_UINT;
+ case FMT_CASE(R, SIGNED_INT32): return VK_FORMAT_R32_SINT;
+ case FMT_CASE(R, FLOAT): return VK_FORMAT_R32_SFLOAT;
+
+ case FMT_CASE(RG, UNSIGNED_INT32): return VK_FORMAT_R32G32_UINT;
+ case FMT_CASE(RG, SIGNED_INT32): return VK_FORMAT_R32G32_SINT;
+ case FMT_CASE(RG, FLOAT): return VK_FORMAT_R32G32_SFLOAT;
+
+ case FMT_CASE(RGB, UNSIGNED_INT32): return VK_FORMAT_R32G32B32_UINT;
+ case FMT_CASE(RGB, SIGNED_INT32): return VK_FORMAT_R32G32B32_SINT;
+ case FMT_CASE(RGB, FLOAT): return VK_FORMAT_R32G32B32_SFLOAT;
+
+ case FMT_CASE(RGBA, UNSIGNED_INT32): return VK_FORMAT_R32G32B32A32_UINT;
+ case FMT_CASE(RGBA, SIGNED_INT32): return VK_FORMAT_R32G32B32A32_SINT;
+ case FMT_CASE(RGBA, FLOAT): return VK_FORMAT_R32G32B32A32_SFLOAT;
+
+ case FMT_CASE(RGB, UNSIGNED_INT_11F_11F_10F_REV): return VK_FORMAT_R11G11B10_UFLOAT;
+ case FMT_CASE(RGB, UNSIGNED_INT_999_E5_REV): return VK_FORMAT_R9G9B9E5_UFLOAT;
+
+ case FMT_CASE(D, UNORM_INT16): return VK_FORMAT_D16_UNORM;
+ case FMT_CASE(D, UNORM_INT24): return VK_FORMAT_D24_UNORM;
+ case FMT_CASE(D, FLOAT): return VK_FORMAT_D32_SFLOAT;
+
+ case FMT_CASE(S, UNSIGNED_INT8): return VK_FORMAT_S8_UINT;
+ case FMT_CASE(DS, FLOAT_UNSIGNED_INT_24_8_REV): return VK_FORMAT_D24_UNORM_S8_UINT;
+
+ case FMT_CASE(BGRA, UNORM_SHORT_4444): return VK_FORMAT_B4G4R4A4_UNORM;
+ case FMT_CASE(BGRA, UNORM_SHORT_5551): return VK_FORMAT_B5G5R5A1_UNORM;
+ default:
+ TCU_THROW(InternalError, "Unknown texture format");
+ }
+
+#undef PACK_FMT
+#undef FMT_CASE
+}
+
+tcu::TextureFormat mapVkFormat (VkFormat format)
+{
+ using tcu::TextureFormat;
+
+ switch (format)
+ {
+ case VK_FORMAT_R4G4_UNORM: return TextureFormat(TextureFormat::RG, TextureFormat::UNORM_SHORT_4444);
+ case VK_FORMAT_R4G4B4A4_UNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_SHORT_4444);
+ case VK_FORMAT_R5G6B5_UNORM: return TextureFormat(TextureFormat::RGB, TextureFormat::UNORM_SHORT_565);
+ case VK_FORMAT_R5G5B5A1_UNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_SHORT_5551);
+
+ case VK_FORMAT_R8_UNORM: return TextureFormat(TextureFormat::R, TextureFormat::UNORM_INT8);
+ case VK_FORMAT_R8_SNORM: return TextureFormat(TextureFormat::R, TextureFormat::SNORM_INT8);
+ case VK_FORMAT_R8_USCALED: return TextureFormat(TextureFormat::R, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8_SSCALED: return TextureFormat(TextureFormat::R, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8_UINT: return TextureFormat(TextureFormat::R, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8_SINT: return TextureFormat(TextureFormat::R, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8_SRGB: return TextureFormat(TextureFormat::sR, TextureFormat::UNORM_INT8);
+
+ case VK_FORMAT_R8G8_UNORM: return TextureFormat(TextureFormat::RG, TextureFormat::UNORM_INT8);
+ case VK_FORMAT_R8G8_SNORM: return TextureFormat(TextureFormat::RG, TextureFormat::SNORM_INT8);
+ case VK_FORMAT_R8G8_USCALED: return TextureFormat(TextureFormat::RG, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8G8_SSCALED: return TextureFormat(TextureFormat::RG, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8G8_UINT: return TextureFormat(TextureFormat::RG, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8G8_SINT: return TextureFormat(TextureFormat::RG, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8G8_SRGB: return TextureFormat(TextureFormat::sRG, TextureFormat::UNORM_INT8);
+
+ case VK_FORMAT_R8G8B8_UNORM: return TextureFormat(TextureFormat::RGB, TextureFormat::UNORM_INT8);
+ case VK_FORMAT_R8G8B8_SNORM: return TextureFormat(TextureFormat::RGB, TextureFormat::SNORM_INT8);
+ case VK_FORMAT_R8G8B8_USCALED: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8G8B8_SSCALED: return TextureFormat(TextureFormat::RGB, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8G8B8_UINT: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8G8B8_SINT: return TextureFormat(TextureFormat::RGB, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8G8B8_SRGB: return TextureFormat(TextureFormat::sRGB, TextureFormat::UNORM_INT8);
+
+ case VK_FORMAT_R8G8B8A8_UNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8);
+ case VK_FORMAT_R8G8B8A8_SNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::SNORM_INT8);
+ case VK_FORMAT_R8G8B8A8_USCALED: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8G8B8A8_SSCALED: return TextureFormat(TextureFormat::RGBA, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8G8B8A8_UINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT8);
+ case VK_FORMAT_R8G8B8A8_SINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::SIGNED_INT8);
+ case VK_FORMAT_R8G8B8A8_SRGB: return TextureFormat(TextureFormat::sRGBA, TextureFormat::UNORM_INT8);
+
+ case VK_FORMAT_R10G10B10A2_UNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT_1010102_REV);
+ case VK_FORMAT_R10G10B10A2_UINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT_1010102_REV);
+ case VK_FORMAT_R10G10B10A2_USCALED: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT_1010102_REV);
+
+ case VK_FORMAT_R16_UNORM: return TextureFormat(TextureFormat::R, TextureFormat::UNORM_INT16);
+ case VK_FORMAT_R16_SNORM: return TextureFormat(TextureFormat::R, TextureFormat::SNORM_INT16);
+ case VK_FORMAT_R16_USCALED: return TextureFormat(TextureFormat::R, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16_SSCALED: return TextureFormat(TextureFormat::R, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16_UINT: return TextureFormat(TextureFormat::R, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16_SINT: return TextureFormat(TextureFormat::R, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16_SFLOAT: return TextureFormat(TextureFormat::R, TextureFormat::HALF_FLOAT);
+
+ case VK_FORMAT_R16G16_UNORM: return TextureFormat(TextureFormat::RG, TextureFormat::UNORM_INT16);
+ case VK_FORMAT_R16G16_SNORM: return TextureFormat(TextureFormat::RG, TextureFormat::SNORM_INT16);
+ case VK_FORMAT_R16G16_USCALED: return TextureFormat(TextureFormat::RG, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16G16_SSCALED: return TextureFormat(TextureFormat::RG, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16G16_UINT: return TextureFormat(TextureFormat::RG, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16G16_SINT: return TextureFormat(TextureFormat::RG, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16G16_SFLOAT: return TextureFormat(TextureFormat::RG, TextureFormat::HALF_FLOAT);
+
+ case VK_FORMAT_R16G16B16_UNORM: return TextureFormat(TextureFormat::RGB, TextureFormat::UNORM_INT16);
+ case VK_FORMAT_R16G16B16_SNORM: return TextureFormat(TextureFormat::RGB, TextureFormat::SNORM_INT16);
+ case VK_FORMAT_R16G16B16_USCALED: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16G16B16_SSCALED: return TextureFormat(TextureFormat::RGB, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16G16B16_UINT: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16G16B16_SINT: return TextureFormat(TextureFormat::RGB, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16G16B16_SFLOAT: return TextureFormat(TextureFormat::RGB, TextureFormat::HALF_FLOAT);
+
+ case VK_FORMAT_R16G16B16A16_UNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT16);
+ case VK_FORMAT_R16G16B16A16_SNORM: return TextureFormat(TextureFormat::RGBA, TextureFormat::SNORM_INT16);
+ case VK_FORMAT_R16G16B16A16_USCALED:return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16G16B16A16_SSCALED:return TextureFormat(TextureFormat::RGBA, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16G16B16A16_UINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT16);
+ case VK_FORMAT_R16G16B16A16_SINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::SIGNED_INT16);
+ case VK_FORMAT_R16G16B16A16_SFLOAT: return TextureFormat(TextureFormat::RGBA, TextureFormat::HALF_FLOAT);
+
+ case VK_FORMAT_R32_UINT: return TextureFormat(TextureFormat::R, TextureFormat::UNSIGNED_INT32);
+ case VK_FORMAT_R32_SINT: return TextureFormat(TextureFormat::R, TextureFormat::SIGNED_INT32);
+ case VK_FORMAT_R32_SFLOAT: return TextureFormat(TextureFormat::R, TextureFormat::FLOAT);
+
+ case VK_FORMAT_R32G32_UINT: return TextureFormat(TextureFormat::RG, TextureFormat::UNSIGNED_INT32);
+ case VK_FORMAT_R32G32_SINT: return TextureFormat(TextureFormat::RG, TextureFormat::SIGNED_INT32);
+ case VK_FORMAT_R32G32_SFLOAT: return TextureFormat(TextureFormat::RG, TextureFormat::FLOAT);
+
+ case VK_FORMAT_R32G32B32_UINT: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT32);
+ case VK_FORMAT_R32G32B32_SINT: return TextureFormat(TextureFormat::RGB, TextureFormat::SIGNED_INT32);
+ case VK_FORMAT_R32G32B32_SFLOAT: return TextureFormat(TextureFormat::RGB, TextureFormat::FLOAT);
+
+ case VK_FORMAT_R32G32B32A32_UINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::UNSIGNED_INT32);
+ case VK_FORMAT_R32G32B32A32_SINT: return TextureFormat(TextureFormat::RGBA, TextureFormat::SIGNED_INT32);
+ case VK_FORMAT_R32G32B32A32_SFLOAT: return TextureFormat(TextureFormat::RGBA, TextureFormat::FLOAT);
+
+ case VK_FORMAT_R11G11B10_UFLOAT: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT_11F_11F_10F_REV);
+ case VK_FORMAT_R9G9B9E5_UFLOAT: return TextureFormat(TextureFormat::RGB, TextureFormat::UNSIGNED_INT_999_E5_REV);
+
+ case VK_FORMAT_D16_UNORM: return TextureFormat(TextureFormat::D, TextureFormat::UNORM_INT16);
+ case VK_FORMAT_D24_UNORM: return TextureFormat(TextureFormat::D, TextureFormat::UNORM_INT24);
+ case VK_FORMAT_D32_SFLOAT: return TextureFormat(TextureFormat::D, TextureFormat::FLOAT);
+
+ case VK_FORMAT_S8_UINT: return TextureFormat(TextureFormat::S, TextureFormat::UNSIGNED_INT8);
+
+ // \note There is no standard interleaved memory layout for DS formats; buffer-image copies
+ // will always operate on either D or S aspect only. See Khronos bug 12998
+ case VK_FORMAT_D16_UNORM_S8_UINT: return TextureFormat(TextureFormat::DS, TextureFormat::UNSIGNED_INT_16_8);
+ case VK_FORMAT_D24_UNORM_S8_UINT: return TextureFormat(TextureFormat::DS, TextureFormat::UNSIGNED_INT_24_8);
+ case VK_FORMAT_D32_SFLOAT_S8_UINT: return TextureFormat(TextureFormat::DS, TextureFormat::FLOAT_UNSIGNED_INT_8);
+
+ case VK_FORMAT_B4G4R4A4_UNORM: return TextureFormat(TextureFormat::BGRA, TextureFormat::UNORM_SHORT_4444);
+ case VK_FORMAT_B5G5R5A1_UNORM: return TextureFormat(TextureFormat::BGRA, TextureFormat::UNORM_SHORT_5551);
+
+ case VK_FORMAT_R4G4_USCALED:
+ case VK_FORMAT_R4G4B4A4_USCALED:
+ case VK_FORMAT_R5G6B5_USCALED:
+ case VK_FORMAT_R5G5B5A1_USCALED:
+ DE_FATAL("Format not implemented");
+
+ default:
+ TCU_THROW(InternalError, "Unknown image format");
+ }
+}
+
+} // vk
--- /dev/null
+#ifndef _VKIMAGEUTIL_HPP
+#define _VKIMAGEUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for images.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "tcuTexture.hpp"
+
+namespace vk
+{
+
+bool isFloatFormat (VkFormat format);
+bool isUnormFormat (VkFormat format);
+bool isSnormFormat (VkFormat format);
+bool isIntFormat (VkFormat format);
+bool isUintFormat (VkFormat format);
+bool isDepthStencilFormat (VkFormat format);
+bool isCompressedFormat (VkFormat format);
+
+tcu::TextureFormat mapVkFormat (VkFormat format);
+VkFormat mapTextureFormat (const tcu::TextureFormat& format);
+
+} // vk
+
+#endif // _VKIMAGEUTIL_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+m_vk.destroyDevice = (DestroyDeviceFunc) GET_PROC_ADDR("vkDestroyDevice");
+m_vk.getGlobalExtensionProperties = (GetGlobalExtensionPropertiesFunc) GET_PROC_ADDR("vkGetGlobalExtensionProperties");
+m_vk.getPhysicalDeviceExtensionProperties = (GetPhysicalDeviceExtensionPropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceExtensionProperties");
+m_vk.getGlobalLayerProperties = (GetGlobalLayerPropertiesFunc) GET_PROC_ADDR("vkGetGlobalLayerProperties");
+m_vk.getPhysicalDeviceLayerProperties = (GetPhysicalDeviceLayerPropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceLayerProperties");
+m_vk.getDeviceQueue = (GetDeviceQueueFunc) GET_PROC_ADDR("vkGetDeviceQueue");
+m_vk.queueSubmit = (QueueSubmitFunc) GET_PROC_ADDR("vkQueueSubmit");
+m_vk.queueWaitIdle = (QueueWaitIdleFunc) GET_PROC_ADDR("vkQueueWaitIdle");
+m_vk.deviceWaitIdle = (DeviceWaitIdleFunc) GET_PROC_ADDR("vkDeviceWaitIdle");
+m_vk.allocMemory = (AllocMemoryFunc) GET_PROC_ADDR("vkAllocMemory");
+m_vk.freeMemory = (FreeMemoryFunc) GET_PROC_ADDR("vkFreeMemory");
+m_vk.mapMemory = (MapMemoryFunc) GET_PROC_ADDR("vkMapMemory");
+m_vk.unmapMemory = (UnmapMemoryFunc) GET_PROC_ADDR("vkUnmapMemory");
+m_vk.flushMappedMemoryRanges = (FlushMappedMemoryRangesFunc) GET_PROC_ADDR("vkFlushMappedMemoryRanges");
+m_vk.invalidateMappedMemoryRanges = (InvalidateMappedMemoryRangesFunc) GET_PROC_ADDR("vkInvalidateMappedMemoryRanges");
+m_vk.getDeviceMemoryCommitment = (GetDeviceMemoryCommitmentFunc) GET_PROC_ADDR("vkGetDeviceMemoryCommitment");
+m_vk.bindBufferMemory = (BindBufferMemoryFunc) GET_PROC_ADDR("vkBindBufferMemory");
+m_vk.bindImageMemory = (BindImageMemoryFunc) GET_PROC_ADDR("vkBindImageMemory");
+m_vk.getBufferMemoryRequirements = (GetBufferMemoryRequirementsFunc) GET_PROC_ADDR("vkGetBufferMemoryRequirements");
+m_vk.getImageMemoryRequirements = (GetImageMemoryRequirementsFunc) GET_PROC_ADDR("vkGetImageMemoryRequirements");
+m_vk.getImageSparseMemoryRequirements = (GetImageSparseMemoryRequirementsFunc) GET_PROC_ADDR("vkGetImageSparseMemoryRequirements");
+m_vk.getPhysicalDeviceSparseImageFormatProperties = (GetPhysicalDeviceSparseImageFormatPropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceSparseImageFormatProperties");
+m_vk.queueBindSparseBufferMemory = (QueueBindSparseBufferMemoryFunc) GET_PROC_ADDR("vkQueueBindSparseBufferMemory");
+m_vk.queueBindSparseImageOpaqueMemory = (QueueBindSparseImageOpaqueMemoryFunc) GET_PROC_ADDR("vkQueueBindSparseImageOpaqueMemory");
+m_vk.queueBindSparseImageMemory = (QueueBindSparseImageMemoryFunc) GET_PROC_ADDR("vkQueueBindSparseImageMemory");
+m_vk.createFence = (CreateFenceFunc) GET_PROC_ADDR("vkCreateFence");
+m_vk.destroyFence = (DestroyFenceFunc) GET_PROC_ADDR("vkDestroyFence");
+m_vk.resetFences = (ResetFencesFunc) GET_PROC_ADDR("vkResetFences");
+m_vk.getFenceStatus = (GetFenceStatusFunc) GET_PROC_ADDR("vkGetFenceStatus");
+m_vk.waitForFences = (WaitForFencesFunc) GET_PROC_ADDR("vkWaitForFences");
+m_vk.createSemaphore = (CreateSemaphoreFunc) GET_PROC_ADDR("vkCreateSemaphore");
+m_vk.destroySemaphore = (DestroySemaphoreFunc) GET_PROC_ADDR("vkDestroySemaphore");
+m_vk.queueSignalSemaphore = (QueueSignalSemaphoreFunc) GET_PROC_ADDR("vkQueueSignalSemaphore");
+m_vk.queueWaitSemaphore = (QueueWaitSemaphoreFunc) GET_PROC_ADDR("vkQueueWaitSemaphore");
+m_vk.createEvent = (CreateEventFunc) GET_PROC_ADDR("vkCreateEvent");
+m_vk.destroyEvent = (DestroyEventFunc) GET_PROC_ADDR("vkDestroyEvent");
+m_vk.getEventStatus = (GetEventStatusFunc) GET_PROC_ADDR("vkGetEventStatus");
+m_vk.setEvent = (SetEventFunc) GET_PROC_ADDR("vkSetEvent");
+m_vk.resetEvent = (ResetEventFunc) GET_PROC_ADDR("vkResetEvent");
+m_vk.createQueryPool = (CreateQueryPoolFunc) GET_PROC_ADDR("vkCreateQueryPool");
+m_vk.destroyQueryPool = (DestroyQueryPoolFunc) GET_PROC_ADDR("vkDestroyQueryPool");
+m_vk.getQueryPoolResults = (GetQueryPoolResultsFunc) GET_PROC_ADDR("vkGetQueryPoolResults");
+m_vk.createBuffer = (CreateBufferFunc) GET_PROC_ADDR("vkCreateBuffer");
+m_vk.destroyBuffer = (DestroyBufferFunc) GET_PROC_ADDR("vkDestroyBuffer");
+m_vk.createBufferView = (CreateBufferViewFunc) GET_PROC_ADDR("vkCreateBufferView");
+m_vk.destroyBufferView = (DestroyBufferViewFunc) GET_PROC_ADDR("vkDestroyBufferView");
+m_vk.createImage = (CreateImageFunc) GET_PROC_ADDR("vkCreateImage");
+m_vk.destroyImage = (DestroyImageFunc) GET_PROC_ADDR("vkDestroyImage");
+m_vk.getImageSubresourceLayout = (GetImageSubresourceLayoutFunc) GET_PROC_ADDR("vkGetImageSubresourceLayout");
+m_vk.createImageView = (CreateImageViewFunc) GET_PROC_ADDR("vkCreateImageView");
+m_vk.destroyImageView = (DestroyImageViewFunc) GET_PROC_ADDR("vkDestroyImageView");
+m_vk.createAttachmentView = (CreateAttachmentViewFunc) GET_PROC_ADDR("vkCreateAttachmentView");
+m_vk.destroyAttachmentView = (DestroyAttachmentViewFunc) GET_PROC_ADDR("vkDestroyAttachmentView");
+m_vk.createShaderModule = (CreateShaderModuleFunc) GET_PROC_ADDR("vkCreateShaderModule");
+m_vk.destroyShaderModule = (DestroyShaderModuleFunc) GET_PROC_ADDR("vkDestroyShaderModule");
+m_vk.createShader = (CreateShaderFunc) GET_PROC_ADDR("vkCreateShader");
+m_vk.destroyShader = (DestroyShaderFunc) GET_PROC_ADDR("vkDestroyShader");
+m_vk.createPipelineCache = (CreatePipelineCacheFunc) GET_PROC_ADDR("vkCreatePipelineCache");
+m_vk.destroyPipelineCache = (DestroyPipelineCacheFunc) GET_PROC_ADDR("vkDestroyPipelineCache");
+m_vk.getPipelineCacheSize = (GetPipelineCacheSizeFunc) GET_PROC_ADDR("vkGetPipelineCacheSize");
+m_vk.getPipelineCacheData = (GetPipelineCacheDataFunc) GET_PROC_ADDR("vkGetPipelineCacheData");
+m_vk.mergePipelineCaches = (MergePipelineCachesFunc) GET_PROC_ADDR("vkMergePipelineCaches");
+m_vk.createGraphicsPipelines = (CreateGraphicsPipelinesFunc) GET_PROC_ADDR("vkCreateGraphicsPipelines");
+m_vk.createComputePipelines = (CreateComputePipelinesFunc) GET_PROC_ADDR("vkCreateComputePipelines");
+m_vk.destroyPipeline = (DestroyPipelineFunc) GET_PROC_ADDR("vkDestroyPipeline");
+m_vk.createPipelineLayout = (CreatePipelineLayoutFunc) GET_PROC_ADDR("vkCreatePipelineLayout");
+m_vk.destroyPipelineLayout = (DestroyPipelineLayoutFunc) GET_PROC_ADDR("vkDestroyPipelineLayout");
+m_vk.createSampler = (CreateSamplerFunc) GET_PROC_ADDR("vkCreateSampler");
+m_vk.destroySampler = (DestroySamplerFunc) GET_PROC_ADDR("vkDestroySampler");
+m_vk.createDescriptorSetLayout = (CreateDescriptorSetLayoutFunc) GET_PROC_ADDR("vkCreateDescriptorSetLayout");
+m_vk.destroyDescriptorSetLayout = (DestroyDescriptorSetLayoutFunc) GET_PROC_ADDR("vkDestroyDescriptorSetLayout");
+m_vk.createDescriptorPool = (CreateDescriptorPoolFunc) GET_PROC_ADDR("vkCreateDescriptorPool");
+m_vk.destroyDescriptorPool = (DestroyDescriptorPoolFunc) GET_PROC_ADDR("vkDestroyDescriptorPool");
+m_vk.resetDescriptorPool = (ResetDescriptorPoolFunc) GET_PROC_ADDR("vkResetDescriptorPool");
+m_vk.allocDescriptorSets = (AllocDescriptorSetsFunc) GET_PROC_ADDR("vkAllocDescriptorSets");
+m_vk.freeDescriptorSets = (FreeDescriptorSetsFunc) GET_PROC_ADDR("vkFreeDescriptorSets");
+m_vk.updateDescriptorSets = (UpdateDescriptorSetsFunc) GET_PROC_ADDR("vkUpdateDescriptorSets");
+m_vk.createDynamicViewportState = (CreateDynamicViewportStateFunc) GET_PROC_ADDR("vkCreateDynamicViewportState");
+m_vk.destroyDynamicViewportState = (DestroyDynamicViewportStateFunc) GET_PROC_ADDR("vkDestroyDynamicViewportState");
+m_vk.createDynamicRasterState = (CreateDynamicRasterStateFunc) GET_PROC_ADDR("vkCreateDynamicRasterState");
+m_vk.destroyDynamicRasterState = (DestroyDynamicRasterStateFunc) GET_PROC_ADDR("vkDestroyDynamicRasterState");
+m_vk.createDynamicColorBlendState = (CreateDynamicColorBlendStateFunc) GET_PROC_ADDR("vkCreateDynamicColorBlendState");
+m_vk.destroyDynamicColorBlendState = (DestroyDynamicColorBlendStateFunc) GET_PROC_ADDR("vkDestroyDynamicColorBlendState");
+m_vk.createDynamicDepthStencilState = (CreateDynamicDepthStencilStateFunc) GET_PROC_ADDR("vkCreateDynamicDepthStencilState");
+m_vk.destroyDynamicDepthStencilState = (DestroyDynamicDepthStencilStateFunc) GET_PROC_ADDR("vkDestroyDynamicDepthStencilState");
+m_vk.createFramebuffer = (CreateFramebufferFunc) GET_PROC_ADDR("vkCreateFramebuffer");
+m_vk.destroyFramebuffer = (DestroyFramebufferFunc) GET_PROC_ADDR("vkDestroyFramebuffer");
+m_vk.createRenderPass = (CreateRenderPassFunc) GET_PROC_ADDR("vkCreateRenderPass");
+m_vk.destroyRenderPass = (DestroyRenderPassFunc) GET_PROC_ADDR("vkDestroyRenderPass");
+m_vk.getRenderAreaGranularity = (GetRenderAreaGranularityFunc) GET_PROC_ADDR("vkGetRenderAreaGranularity");
+m_vk.createCommandPool = (CreateCommandPoolFunc) GET_PROC_ADDR("vkCreateCommandPool");
+m_vk.destroyCommandPool = (DestroyCommandPoolFunc) GET_PROC_ADDR("vkDestroyCommandPool");
+m_vk.resetCommandPool = (ResetCommandPoolFunc) GET_PROC_ADDR("vkResetCommandPool");
+m_vk.createCommandBuffer = (CreateCommandBufferFunc) GET_PROC_ADDR("vkCreateCommandBuffer");
+m_vk.destroyCommandBuffer = (DestroyCommandBufferFunc) GET_PROC_ADDR("vkDestroyCommandBuffer");
+m_vk.beginCommandBuffer = (BeginCommandBufferFunc) GET_PROC_ADDR("vkBeginCommandBuffer");
+m_vk.endCommandBuffer = (EndCommandBufferFunc) GET_PROC_ADDR("vkEndCommandBuffer");
+m_vk.resetCommandBuffer = (ResetCommandBufferFunc) GET_PROC_ADDR("vkResetCommandBuffer");
+m_vk.cmdBindPipeline = (CmdBindPipelineFunc) GET_PROC_ADDR("vkCmdBindPipeline");
+m_vk.cmdBindDynamicViewportState = (CmdBindDynamicViewportStateFunc) GET_PROC_ADDR("vkCmdBindDynamicViewportState");
+m_vk.cmdBindDynamicRasterState = (CmdBindDynamicRasterStateFunc) GET_PROC_ADDR("vkCmdBindDynamicRasterState");
+m_vk.cmdBindDynamicColorBlendState = (CmdBindDynamicColorBlendStateFunc) GET_PROC_ADDR("vkCmdBindDynamicColorBlendState");
+m_vk.cmdBindDynamicDepthStencilState = (CmdBindDynamicDepthStencilStateFunc) GET_PROC_ADDR("vkCmdBindDynamicDepthStencilState");
+m_vk.cmdBindDescriptorSets = (CmdBindDescriptorSetsFunc) GET_PROC_ADDR("vkCmdBindDescriptorSets");
+m_vk.cmdBindIndexBuffer = (CmdBindIndexBufferFunc) GET_PROC_ADDR("vkCmdBindIndexBuffer");
+m_vk.cmdBindVertexBuffers = (CmdBindVertexBuffersFunc) GET_PROC_ADDR("vkCmdBindVertexBuffers");
+m_vk.cmdDraw = (CmdDrawFunc) GET_PROC_ADDR("vkCmdDraw");
+m_vk.cmdDrawIndexed = (CmdDrawIndexedFunc) GET_PROC_ADDR("vkCmdDrawIndexed");
+m_vk.cmdDrawIndirect = (CmdDrawIndirectFunc) GET_PROC_ADDR("vkCmdDrawIndirect");
+m_vk.cmdDrawIndexedIndirect = (CmdDrawIndexedIndirectFunc) GET_PROC_ADDR("vkCmdDrawIndexedIndirect");
+m_vk.cmdDispatch = (CmdDispatchFunc) GET_PROC_ADDR("vkCmdDispatch");
+m_vk.cmdDispatchIndirect = (CmdDispatchIndirectFunc) GET_PROC_ADDR("vkCmdDispatchIndirect");
+m_vk.cmdCopyBuffer = (CmdCopyBufferFunc) GET_PROC_ADDR("vkCmdCopyBuffer");
+m_vk.cmdCopyImage = (CmdCopyImageFunc) GET_PROC_ADDR("vkCmdCopyImage");
+m_vk.cmdBlitImage = (CmdBlitImageFunc) GET_PROC_ADDR("vkCmdBlitImage");
+m_vk.cmdCopyBufferToImage = (CmdCopyBufferToImageFunc) GET_PROC_ADDR("vkCmdCopyBufferToImage");
+m_vk.cmdCopyImageToBuffer = (CmdCopyImageToBufferFunc) GET_PROC_ADDR("vkCmdCopyImageToBuffer");
+m_vk.cmdUpdateBuffer = (CmdUpdateBufferFunc) GET_PROC_ADDR("vkCmdUpdateBuffer");
+m_vk.cmdFillBuffer = (CmdFillBufferFunc) GET_PROC_ADDR("vkCmdFillBuffer");
+m_vk.cmdClearColorImage = (CmdClearColorImageFunc) GET_PROC_ADDR("vkCmdClearColorImage");
+m_vk.cmdClearDepthStencilImage = (CmdClearDepthStencilImageFunc) GET_PROC_ADDR("vkCmdClearDepthStencilImage");
+m_vk.cmdClearColorAttachment = (CmdClearColorAttachmentFunc) GET_PROC_ADDR("vkCmdClearColorAttachment");
+m_vk.cmdClearDepthStencilAttachment = (CmdClearDepthStencilAttachmentFunc) GET_PROC_ADDR("vkCmdClearDepthStencilAttachment");
+m_vk.cmdResolveImage = (CmdResolveImageFunc) GET_PROC_ADDR("vkCmdResolveImage");
+m_vk.cmdSetEvent = (CmdSetEventFunc) GET_PROC_ADDR("vkCmdSetEvent");
+m_vk.cmdResetEvent = (CmdResetEventFunc) GET_PROC_ADDR("vkCmdResetEvent");
+m_vk.cmdWaitEvents = (CmdWaitEventsFunc) GET_PROC_ADDR("vkCmdWaitEvents");
+m_vk.cmdPipelineBarrier = (CmdPipelineBarrierFunc) GET_PROC_ADDR("vkCmdPipelineBarrier");
+m_vk.cmdBeginQuery = (CmdBeginQueryFunc) GET_PROC_ADDR("vkCmdBeginQuery");
+m_vk.cmdEndQuery = (CmdEndQueryFunc) GET_PROC_ADDR("vkCmdEndQuery");
+m_vk.cmdResetQueryPool = (CmdResetQueryPoolFunc) GET_PROC_ADDR("vkCmdResetQueryPool");
+m_vk.cmdWriteTimestamp = (CmdWriteTimestampFunc) GET_PROC_ADDR("vkCmdWriteTimestamp");
+m_vk.cmdCopyQueryPoolResults = (CmdCopyQueryPoolResultsFunc) GET_PROC_ADDR("vkCmdCopyQueryPoolResults");
+m_vk.cmdPushConstants = (CmdPushConstantsFunc) GET_PROC_ADDR("vkCmdPushConstants");
+m_vk.cmdBeginRenderPass = (CmdBeginRenderPassFunc) GET_PROC_ADDR("vkCmdBeginRenderPass");
+m_vk.cmdNextSubpass = (CmdNextSubpassFunc) GET_PROC_ADDR("vkCmdNextSubpass");
+m_vk.cmdEndRenderPass = (CmdEndRenderPassFunc) GET_PROC_ADDR("vkCmdEndRenderPass");
+m_vk.cmdExecuteCommands = (CmdExecuteCommandsFunc) GET_PROC_ADDR("vkCmdExecuteCommands");
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+m_vk.destroyInstance = (DestroyInstanceFunc) GET_PROC_ADDR("vkDestroyInstance");
+m_vk.enumeratePhysicalDevices = (EnumeratePhysicalDevicesFunc) GET_PROC_ADDR("vkEnumeratePhysicalDevices");
+m_vk.getPhysicalDeviceFeatures = (GetPhysicalDeviceFeaturesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceFeatures");
+m_vk.getPhysicalDeviceFormatProperties = (GetPhysicalDeviceFormatPropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceFormatProperties");
+m_vk.getPhysicalDeviceImageFormatProperties = (GetPhysicalDeviceImageFormatPropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceImageFormatProperties");
+m_vk.getPhysicalDeviceLimits = (GetPhysicalDeviceLimitsFunc) GET_PROC_ADDR("vkGetPhysicalDeviceLimits");
+m_vk.getPhysicalDeviceProperties = (GetPhysicalDevicePropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceProperties");
+m_vk.getPhysicalDeviceQueueCount = (GetPhysicalDeviceQueueCountFunc) GET_PROC_ADDR("vkGetPhysicalDeviceQueueCount");
+m_vk.getPhysicalDeviceQueueProperties = (GetPhysicalDeviceQueuePropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceQueueProperties");
+m_vk.getPhysicalDeviceMemoryProperties = (GetPhysicalDeviceMemoryPropertiesFunc) GET_PROC_ADDR("vkGetPhysicalDeviceMemoryProperties");
+m_vk.getDeviceProcAddr = (GetDeviceProcAddrFunc) GET_PROC_ADDR("vkGetDeviceProcAddr");
+m_vk.createDevice = (CreateDeviceFunc) GET_PROC_ADDR("vkCreateDevice");
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+m_vk.createInstance = (CreateInstanceFunc) GET_PROC_ADDR("vkCreateInstance");
+m_vk.getInstanceProcAddr = (GetInstanceProcAddrFunc) GET_PROC_ADDR("vkGetInstanceProcAddr");
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+
+VkResult InstanceDriver::destroyInstance (VkInstance instance) const
+{
+ return m_vk.destroyInstance(instance);
+}
+
+VkResult InstanceDriver::enumeratePhysicalDevices (VkInstance instance, deUint32* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const
+{
+ return m_vk.enumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceFeatures (VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures) const
+{
+ return m_vk.getPhysicalDeviceFeatures(physicalDevice, pFeatures);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties) const
+{
+ return m_vk.getPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageFormatProperties* pImageFormatProperties) const
+{
+ return m_vk.getPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, pImageFormatProperties);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceLimits (VkPhysicalDevice physicalDevice, VkPhysicalDeviceLimits* pLimits) const
+{
+ return m_vk.getPhysicalDeviceLimits(physicalDevice, pLimits);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceProperties (VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties) const
+{
+ return m_vk.getPhysicalDeviceProperties(physicalDevice, pProperties);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceQueueCount (VkPhysicalDevice physicalDevice, deUint32* pCount) const
+{
+ return m_vk.getPhysicalDeviceQueueCount(physicalDevice, pCount);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceQueueProperties (VkPhysicalDevice physicalDevice, deUint32 count, VkPhysicalDeviceQueueProperties* pQueueProperties) const
+{
+ return m_vk.getPhysicalDeviceQueueProperties(physicalDevice, count, pQueueProperties);
+}
+
+VkResult InstanceDriver::getPhysicalDeviceMemoryProperties (VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties) const
+{
+ return m_vk.getPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
+}
+
+PFN_vkVoidFunction InstanceDriver::getDeviceProcAddr (VkDevice device, const char* pName) const
+{
+ return m_vk.getDeviceProcAddr(device, pName);
+}
+
+VkResult InstanceDriver::createDevice (VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice) const
+{
+ return m_vk.createDevice(physicalDevice, pCreateInfo, pDevice);
+}
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+DestroyInstanceFunc destroyInstance;
+EnumeratePhysicalDevicesFunc enumeratePhysicalDevices;
+GetPhysicalDeviceFeaturesFunc getPhysicalDeviceFeatures;
+GetPhysicalDeviceFormatPropertiesFunc getPhysicalDeviceFormatProperties;
+GetPhysicalDeviceImageFormatPropertiesFunc getPhysicalDeviceImageFormatProperties;
+GetPhysicalDeviceLimitsFunc getPhysicalDeviceLimits;
+GetPhysicalDevicePropertiesFunc getPhysicalDeviceProperties;
+GetPhysicalDeviceQueueCountFunc getPhysicalDeviceQueueCount;
+GetPhysicalDeviceQueuePropertiesFunc getPhysicalDeviceQueueProperties;
+GetPhysicalDeviceMemoryPropertiesFunc getPhysicalDeviceMemoryProperties;
+GetDeviceProcAddrFunc getDeviceProcAddr;
+CreateDeviceFunc createDevice;
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Memory management utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkMemUtil.hpp"
+#include "vkStrUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "deInt32.h"
+
+#include <sstream>
+
+namespace vk
+{
+
+using de::UniquePtr;
+using de::MovePtr;
+
+namespace
+{
+
+class HostPtr
+{
+public:
+ HostPtr (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags);
+ ~HostPtr (void);
+
+ void* get (void) const { return m_ptr; }
+
+private:
+ const DeviceInterface& m_vkd;
+ const VkDevice m_device;
+ const VkDeviceMemory m_memory;
+ void* const m_ptr;
+};
+
+void* mapMemory (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags)
+{
+ void* hostPtr = DE_NULL;
+ VK_CHECK(vkd.mapMemory(device, mem, offset, size, flags, &hostPtr));
+ TCU_CHECK(hostPtr);
+ return hostPtr;
+}
+
+HostPtr::HostPtr (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags)
+ : m_vkd (vkd)
+ , m_device (device)
+ , m_memory (memory)
+ , m_ptr (mapMemory(vkd, device, memory, offset, size, flags))
+{
+}
+
+HostPtr::~HostPtr (void)
+{
+ m_vkd.unmapMemory(m_device, m_memory);
+}
+
+deUint32 selectMatchingMemoryType (const VkPhysicalDeviceMemoryProperties& deviceMemProps, deUint32 allowedMemTypeBits, MemoryRequirement requirement)
+{
+ for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemProps.memoryTypeCount; memoryTypeNdx++)
+ {
+ if ((allowedMemTypeBits & (1u << memoryTypeNdx)) != 0 &&
+ requirement.matchesHeap(deviceMemProps.memoryTypes[memoryTypeNdx].propertyFlags))
+ return memoryTypeNdx;
+ }
+
+ TCU_THROW(NotSupportedError, "No compatible memory type found");
+}
+
+bool isHostVisibleMemory (const VkPhysicalDeviceMemoryProperties& deviceMemProps, deUint32 memoryTypeNdx)
+{
+ DE_ASSERT(memoryTypeNdx < deviceMemProps.memoryTypeCount);
+ return (deviceMemProps.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0u;
+}
+
+} // anonymous
+
+// Allocation
+
+Allocation::Allocation (VkDeviceMemory memory, VkDeviceSize offset, void* hostPtr)
+ : m_memory (memory)
+ , m_offset (offset)
+ , m_hostPtr (hostPtr)
+{
+}
+
+Allocation::~Allocation (void)
+{
+}
+
+// MemoryRequirement
+
+const MemoryRequirement MemoryRequirement::Any = MemoryRequirement(0x0u);
+const MemoryRequirement MemoryRequirement::HostVisible = MemoryRequirement(MemoryRequirement::FLAG_HOST_VISIBLE);
+const MemoryRequirement MemoryRequirement::Coherent = MemoryRequirement(MemoryRequirement::FLAG_COHERENT);
+const MemoryRequirement MemoryRequirement::LazilyAllocated = MemoryRequirement(MemoryRequirement::FLAG_LAZY_ALLOCATION);
+
+bool MemoryRequirement::matchesHeap (VkMemoryPropertyFlags heapFlags) const
+{
+ // sanity check
+ if ((m_flags & FLAG_COHERENT) && !(m_flags & FLAG_HOST_VISIBLE))
+ DE_FATAL("Coherent memory must be host-visible");
+ if ((m_flags & FLAG_HOST_VISIBLE) && (m_flags & FLAG_LAZY_ALLOCATION))
+ DE_FATAL("Lazily allocated memory cannot be mappable");
+
+ // host-visible
+ if ((m_flags & FLAG_HOST_VISIBLE) && !(heapFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
+ return false;
+
+ // coherent
+ if ((m_flags & FLAG_COHERENT) && (heapFlags & VK_MEMORY_PROPERTY_HOST_NON_COHERENT_BIT))
+ return false;
+
+ // lazy
+ if ((m_flags & FLAG_LAZY_ALLOCATION) && !(heapFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT))
+ return false;
+
+ return true;
+}
+
+MemoryRequirement::MemoryRequirement (deUint32 flags)
+ : m_flags(flags)
+{
+}
+
+// SimpleAllocator
+
+class SimpleAllocation : public Allocation
+{
+public:
+ SimpleAllocation (Move<VkDeviceMemory> mem, MovePtr<HostPtr> hostPtr);
+ virtual ~SimpleAllocation (void);
+
+private:
+ const Unique<VkDeviceMemory> m_memHolder;
+ const UniquePtr<HostPtr> m_hostPtr;
+};
+
+SimpleAllocation::SimpleAllocation (Move<VkDeviceMemory> mem, MovePtr<HostPtr> hostPtr)
+ : Allocation (*mem, (VkDeviceSize)0, hostPtr ? hostPtr->get() : DE_NULL)
+ , m_memHolder (mem)
+ , m_hostPtr (hostPtr)
+{
+}
+
+SimpleAllocation::~SimpleAllocation (void)
+{
+}
+
+SimpleAllocator::SimpleAllocator (const DeviceInterface& vk, VkDevice device, const VkPhysicalDeviceMemoryProperties& deviceMemProps)
+ : m_vk (vk)
+ , m_device (device)
+ , m_memProps(deviceMemProps)
+{
+}
+
+MovePtr<Allocation> SimpleAllocator::allocate (const VkMemoryAllocInfo& allocInfo, VkDeviceSize alignment)
+{
+ DE_UNREF(alignment);
+
+ Move<VkDeviceMemory> mem = allocMemory(m_vk, m_device, &allocInfo);
+ MovePtr<HostPtr> hostPtr;
+
+ if (isHostVisibleMemory(m_memProps, allocInfo.memoryTypeIndex))
+ hostPtr = MovePtr<HostPtr>(new HostPtr(m_vk, m_device, *mem, 0u, allocInfo.allocationSize, 0u));
+
+ return MovePtr<Allocation>(new SimpleAllocation(mem, hostPtr));
+}
+
+MovePtr<Allocation> SimpleAllocator::allocate (const VkMemoryRequirements& memReqs, MemoryRequirement requirement)
+{
+ const deUint32 memoryTypeNdx = selectMatchingMemoryType(m_memProps, memReqs.memoryTypeBits, requirement);
+ const VkMemoryAllocInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ memReqs.size, // VkDeviceSize allocationSize;
+ memoryTypeNdx, // deUint32 memoryTypeIndex;
+ };
+
+ Move<VkDeviceMemory> mem = allocMemory(m_vk, m_device, &allocInfo);
+ MovePtr<HostPtr> hostPtr;
+
+ if (requirement & MemoryRequirement::HostVisible)
+ {
+ DE_ASSERT(isHostVisibleMemory(m_memProps, allocInfo.memoryTypeIndex));
+ hostPtr = MovePtr<HostPtr>(new HostPtr(m_vk, m_device, *mem, 0u, allocInfo.allocationSize, 0u));
+ }
+
+ return MovePtr<Allocation>(new SimpleAllocation(mem, hostPtr));
+}
+
+void flushMappedMemoryRange (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size)
+{
+ const VkMappedMemoryRange range =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
+ DE_NULL,
+ memory,
+ offset,
+ size
+ };
+
+ VK_CHECK(vkd.flushMappedMemoryRanges(device, 1u, &range));
+}
+
+void invalidateMappedMemoryRange (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size)
+{
+ const VkMappedMemoryRange range =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
+ DE_NULL,
+ memory,
+ offset,
+ size
+ };
+
+ VK_CHECK(vkd.invalidateMappedMemoryRanges(device, 1u, &range));
+}
+
+} // vk
--- /dev/null
+#ifndef _VKMEMUTIL_HPP
+#define _VKMEMUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Memory management utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "deUniquePtr.hpp"
+
+namespace vk
+{
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Memory allocation interface
+ *
+ * Allocation represents block of device memory and is allocated by
+ * Allocator implementation. Test code should use Allocator for allocating
+ * memory, unless there is a reason not to (for example testing vkAllocMemory).
+ *
+ * Allocation doesn't necessarily correspond to a whole VkDeviceMemory, but
+ * instead it may represent sub-allocation. Thus whenever VkDeviceMemory
+ * (getMemory()) managed by Allocation is passed to Vulkan API calls,
+ * offset given by getOffset() must be used.
+ *
+ * If host-visible memory was requested, host pointer to the memory can
+ * be queried with getHostPtr(). No offset is needed when accessing host
+ * pointer, i.e. the pointer is already adjusted in case of sub-allocation.
+ *
+ * Memory mappings are managed solely by Allocation, i.e. unmapping or
+ * re-mapping VkDeviceMemory owned by Allocation is not allowed.
+ *//*--------------------------------------------------------------------*/
+class Allocation
+{
+public:
+ virtual ~Allocation (void);
+
+ //! Get VkDeviceMemory backing this allocation
+ VkDeviceMemory getMemory (void) const { return m_memory; }
+
+ //! Get offset in VkDeviceMemory for this allocation
+ VkDeviceSize getOffset (void) const { return m_offset; }
+
+ //! Get host pointer for this allocation. Only available for host-visible allocations
+ void* getHostPtr (void) const { DE_ASSERT(m_hostPtr); return m_hostPtr; }
+
+protected:
+ Allocation (VkDeviceMemory memory, VkDeviceSize offset, void* hostPtr);
+
+private:
+ const VkDeviceMemory m_memory;
+ const VkDeviceSize m_offset;
+ void* const m_hostPtr;
+};
+
+//! Memory allocation requirements
+class MemoryRequirement
+{
+public:
+ static const MemoryRequirement Any;
+ static const MemoryRequirement HostVisible;
+ static const MemoryRequirement Coherent;
+ static const MemoryRequirement LazilyAllocated;
+
+ inline MemoryRequirement operator| (MemoryRequirement requirement) const
+ {
+ return MemoryRequirement(m_flags | requirement.m_flags);
+ }
+
+ inline MemoryRequirement operator& (MemoryRequirement requirement) const
+ {
+ return MemoryRequirement(m_flags & requirement.m_flags);
+ }
+
+ bool matchesHeap (VkMemoryPropertyFlags heapFlags) const;
+
+ inline operator bool (void) const { return m_flags != 0u; }
+
+private:
+ explicit MemoryRequirement (deUint32 flags);
+
+ const deUint32 m_flags;
+
+ enum Flags
+ {
+ FLAG_HOST_VISIBLE = 1u << 0u,
+ FLAG_COHERENT = 1u << 1u,
+ FLAG_LAZY_ALLOCATION = 1u << 2u,
+ };
+};
+
+//! Memory allocator interface
+class Allocator
+{
+public:
+ Allocator (void) {}
+ virtual ~Allocator (void) {}
+
+ virtual de::MovePtr<Allocation> allocate (const VkMemoryAllocInfo& allocInfo, VkDeviceSize alignment) = 0;
+ virtual de::MovePtr<Allocation> allocate (const VkMemoryRequirements& memRequirements, MemoryRequirement requirement) = 0;
+};
+
+//! Allocator that backs every allocation with its own VkDeviceMemory
+class SimpleAllocator : public Allocator
+{
+public:
+ SimpleAllocator (const DeviceInterface& vk, VkDevice device, const VkPhysicalDeviceMemoryProperties& deviceMemProps);
+
+ de::MovePtr<Allocation> allocate (const VkMemoryAllocInfo& allocInfo, VkDeviceSize alignment);
+ de::MovePtr<Allocation> allocate (const VkMemoryRequirements& memRequirements, MemoryRequirement requirement);
+
+private:
+ const DeviceInterface& m_vk;
+ const VkDevice m_device;
+ const VkPhysicalDeviceMemoryProperties m_memProps;
+};
+
+void flushMappedMemoryRange (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size);
+void invalidateMappedMemoryRange (const DeviceInterface& vkd, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size);
+
+} // vk
+
+#endif // _VKMEMUTIL_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Null (dummy) Vulkan implementation.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkNullDriver.hpp"
+#include "vkPlatform.hpp"
+#include "tcuFunctionLibrary.hpp"
+#include "deMemory.h"
+
+#include <stdexcept>
+
+namespace vk
+{
+
+namespace
+{
+
+#define VK_NULL_RETURN(STMT) \
+ do { \
+ try { \
+ STMT; \
+ return VK_SUCCESS; \
+ } catch (const std::bad_alloc&) { \
+ return VK_ERROR_OUT_OF_HOST_MEMORY; \
+ } catch (VkResult res) { \
+ return res; \
+ } \
+ } while (deGetFalse())
+
+// \todo [2015-07-14 pyry] Check FUNC type by checkedCastToPtr<T>() or similar
+#define VK_NULL_FUNC_ENTRY(NAME, FUNC) { #NAME, (deFunctionPtr)FUNC }
+
+#define VK_NULL_DEFINE_DEVICE_OBJ(NAME) \
+struct NAME \
+{ \
+ NAME (VkDevice, const Vk##NAME##CreateInfo*) {} \
+}
+
+class Instance
+{
+public:
+ Instance (const VkInstanceCreateInfo* instanceInfo);
+ ~Instance (void) {}
+
+ PFN_vkVoidFunction getProcAddr (const char* name) const { return (PFN_vkVoidFunction)m_functions.getFunction(name); }
+
+private:
+ const tcu::StaticFunctionLibrary m_functions;
+};
+
+class Device
+{
+public:
+ Device (VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* deviceInfo);
+ ~Device (void) {}
+
+ PFN_vkVoidFunction getProcAddr (const char* name) const { return (PFN_vkVoidFunction)m_functions.getFunction(name); }
+
+private:
+ const tcu::StaticFunctionLibrary m_functions;
+};
+
+class DescriptorPool
+{
+public:
+ DescriptorPool (VkDevice, VkDescriptorPoolUsage, deUint32, const VkDescriptorPoolCreateInfo*) {}
+};
+
+class DescriptorSet
+{
+public:
+ DescriptorSet (VkDevice, VkDescriptorPool, VkDescriptorSetUsage, VkDescriptorSetLayout) {}
+};
+
+class Pipeline
+{
+public:
+ Pipeline (VkDevice, const VkGraphicsPipelineCreateInfo*) {}
+ Pipeline (VkDevice, const VkComputePipelineCreateInfo*) {}
+};
+
+class DeviceMemory
+{
+public:
+ DeviceMemory (VkDevice, const VkMemoryAllocInfo* pAllocInfo)
+ : m_memory(deMalloc((size_t)pAllocInfo->allocationSize))
+ {
+ if (!m_memory)
+ throw std::bad_alloc();
+ }
+ ~DeviceMemory (void)
+ {
+ deFree(m_memory);
+ }
+
+ void* getPtr (void) const { return m_memory; }
+
+private:
+ void* const m_memory;
+};
+
+class Buffer
+{
+public:
+ Buffer (VkDevice, const VkBufferCreateInfo* pCreateInfo)
+ : m_size(pCreateInfo->size)
+ {}
+
+ VkDeviceSize getSize (void) const { return m_size; }
+
+private:
+ const VkDeviceSize m_size;
+};
+
+VK_NULL_DEFINE_DEVICE_OBJ(CmdBuffer);
+VK_NULL_DEFINE_DEVICE_OBJ(Fence);
+VK_NULL_DEFINE_DEVICE_OBJ(Image);
+VK_NULL_DEFINE_DEVICE_OBJ(Semaphore);
+VK_NULL_DEFINE_DEVICE_OBJ(Event);
+VK_NULL_DEFINE_DEVICE_OBJ(QueryPool);
+VK_NULL_DEFINE_DEVICE_OBJ(BufferView);
+VK_NULL_DEFINE_DEVICE_OBJ(ImageView);
+VK_NULL_DEFINE_DEVICE_OBJ(AttachmentView);
+VK_NULL_DEFINE_DEVICE_OBJ(ShaderModule);
+VK_NULL_DEFINE_DEVICE_OBJ(Shader);
+VK_NULL_DEFINE_DEVICE_OBJ(PipelineCache);
+VK_NULL_DEFINE_DEVICE_OBJ(PipelineLayout);
+VK_NULL_DEFINE_DEVICE_OBJ(RenderPass);
+VK_NULL_DEFINE_DEVICE_OBJ(DescriptorSetLayout);
+VK_NULL_DEFINE_DEVICE_OBJ(Sampler);
+VK_NULL_DEFINE_DEVICE_OBJ(DynamicViewportState);
+VK_NULL_DEFINE_DEVICE_OBJ(DynamicRasterState);
+VK_NULL_DEFINE_DEVICE_OBJ(DynamicColorBlendState);
+VK_NULL_DEFINE_DEVICE_OBJ(DynamicDepthStencilState);
+VK_NULL_DEFINE_DEVICE_OBJ(Framebuffer);
+VK_NULL_DEFINE_DEVICE_OBJ(CmdPool);
+
+extern "C"
+{
+
+PFN_vkVoidFunction getInstanceProcAddr (VkInstance instance, const char* pName)
+{
+ return reinterpret_cast<Instance*>(instance)->getProcAddr(pName);
+}
+
+PFN_vkVoidFunction getDeviceProcAddr (VkDevice device, const char* pName)
+{
+ return reinterpret_cast<Device*>(device)->getProcAddr(pName);
+}
+
+VkResult createGraphicsPipelines (VkDevice device, VkPipelineCache, deUint32 count, const VkGraphicsPipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines)
+{
+ for (deUint32 ndx = 0; ndx < count; ndx++)
+ pPipelines[ndx] = VkPipeline((deUint64)(deUintptr)new Pipeline(device, pCreateInfos+ndx));
+ return VK_SUCCESS;
+}
+
+VkResult createComputePipelines (VkDevice device, VkPipelineCache, deUint32 count, const VkComputePipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines)
+{
+ for (deUint32 ndx = 0; ndx < count; ndx++)
+ pPipelines[ndx] = VkPipeline((deUint64)(deUintptr)new Pipeline(device, pCreateInfos+ndx));
+ return VK_SUCCESS;
+}
+
+VkResult enumeratePhysicalDevices (VkInstance, deUint32* pPhysicalDeviceCount, VkPhysicalDevice* pDevices)
+{
+ if (pDevices && *pPhysicalDeviceCount >= 1u)
+ *pDevices = reinterpret_cast<VkPhysicalDevice>((void*)(deUintptr)1u);
+
+ *pPhysicalDeviceCount = 1;
+
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceQueueCount (VkPhysicalDevice, deUint32* count)
+{
+ if (count)
+ *count = 1u;
+
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceProperties (VkPhysicalDevice, VkPhysicalDeviceProperties* props)
+{
+ const VkPhysicalDeviceProperties defaultProps =
+ {
+ VK_API_VERSION, // deUint32 apiVersion;
+ 1u, // deUint32 driverVersion;
+ 0u, // deUint32 vendorId;
+ 0u, // deUint32 deviceId;
+ VK_PHYSICAL_DEVICE_TYPE_OTHER, // VkPhysicalDeviceType deviceType;
+ "null", // char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME];
+ { 0 } // deUint8 pipelineCacheUUID[VK_UUID_LENGTH];
+ };
+
+ deMemcpy(props, &defaultProps, sizeof(defaultProps));
+
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceQueueProperties (VkPhysicalDevice, deUint32 count, VkPhysicalDeviceQueueProperties* props)
+{
+ if (count >= 1u)
+ {
+ deMemset(props, 0, sizeof(VkPhysicalDeviceQueueProperties));
+
+ props->queueCount = 1u;
+ props->queueFlags = VK_QUEUE_GRAPHICS_BIT|VK_QUEUE_COMPUTE_BIT|VK_QUEUE_DMA_BIT;
+ props->supportsTimestamps = DE_TRUE;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceMemoryProperties (VkPhysicalDevice, VkPhysicalDeviceMemoryProperties* props)
+{
+ deMemset(props, 0, sizeof(VkPhysicalDeviceMemoryProperties));
+
+ props->memoryTypeCount = 1u;
+ props->memoryTypes[0].heapIndex = 0u;
+ props->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
+ props->memoryHeapCount = 1u;
+ props->memoryHeaps[0].size = 1ull << 31;
+ props->memoryHeaps[0].flags = VK_MEMORY_HEAP_HOST_LOCAL;
+
+ return VK_SUCCESS;
+}
+
+VkResult getBufferMemoryRequirements (VkDevice, VkBuffer bufferHandle, VkMemoryRequirements* requirements)
+{
+ const Buffer* buffer = reinterpret_cast<Buffer*>(bufferHandle.getInternal());
+
+ requirements->memoryTypeBits = 1u;
+ requirements->size = buffer->getSize();
+ requirements->alignment = (VkDeviceSize)1u;
+
+ return VK_SUCCESS;
+}
+
+VkResult getImageMemoryRequirements (VkDevice, VkImage, VkMemoryRequirements* requirements)
+{
+ requirements->memoryTypeBits = 1u;
+ requirements->size = 4u;
+ requirements->alignment = 4u;
+
+ return VK_SUCCESS;
+}
+
+VkResult mapMemory (VkDevice, VkDeviceMemory memHandle, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData)
+{
+ const DeviceMemory* memory = reinterpret_cast<DeviceMemory*>(memHandle.getInternal());
+
+ DE_UNREF(size);
+ DE_UNREF(flags);
+
+ *ppData = (deUint8*)memory->getPtr() + offset;
+
+ return VK_SUCCESS;
+}
+
+VkResult allocDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, deUint32 count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, deUint32* pCount)
+{
+ for (deUint32 ndx = 0; ndx < count; ++ndx)
+ {
+ try
+ {
+ pDescriptorSets[ndx] = VkDescriptorSet((deUint64)(deUintptr)new DescriptorSet(device, descriptorPool, setUsage, pSetLayouts[ndx]));
+ }
+ catch (const std::bad_alloc&)
+ {
+ *pCount = ndx;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ catch (VkResult res)
+ {
+ *pCount = ndx;
+ return res;
+ }
+ }
+
+ *pCount = count;
+ return VK_SUCCESS;
+}
+
+VkResult freeDescriptorSets (VkDevice, VkDescriptorPool, deUint32 count, const VkDescriptorSet* pDescriptorSets)
+{
+ for (deUint32 ndx = 0; ndx < count; ++ndx)
+ {
+ // \note: delete cannot fail
+ delete reinterpret_cast<DescriptorSet*>((deUintptr)pDescriptorSets[ndx].getInternal());
+ }
+
+ return VK_SUCCESS;
+}
+
+#include "vkNullDriverImpl.inl"
+
+} // extern "C"
+
+Instance::Instance (const VkInstanceCreateInfo*)
+ : m_functions(s_instanceFunctions, DE_LENGTH_OF_ARRAY(s_instanceFunctions))
+{
+}
+
+Device::Device (VkPhysicalDevice, const VkDeviceCreateInfo*)
+ : m_functions(s_deviceFunctions, DE_LENGTH_OF_ARRAY(s_deviceFunctions))
+{
+}
+
+class NullDriverLibrary : public Library
+{
+public:
+ NullDriverLibrary (void)
+ : m_library (s_platformFunctions, DE_LENGTH_OF_ARRAY(s_platformFunctions))
+ , m_driver (m_library)
+ {}
+
+ const PlatformInterface& getPlatformInterface (void) const { return m_driver; }
+
+private:
+ const tcu::StaticFunctionLibrary m_library;
+ const PlatformDriver m_driver;
+};
+
+} // anonymous
+
+Library* createNullDriver (void)
+{
+ return new NullDriverLibrary();
+}
+
+} // vk
--- /dev/null
+#ifndef _VKNULLDRIVER_HPP
+#define _VKNULLDRIVER_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Null (dummy) Vulkan implementation.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+
+namespace vk
+{
+
+class Library;
+
+Library* createNullDriver (void);
+
+} // vk
+
+#endif // _VKNULLDRIVER_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+VkResult createInstance (const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance)
+{
+ VK_NULL_RETURN(*pInstance = reinterpret_cast<VkInstance>(new Instance(pCreateInfo)));
+}
+
+VkResult createDevice (VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
+{
+ VK_NULL_RETURN(*pDevice = reinterpret_cast<VkDevice>(new Device(physicalDevice, pCreateInfo)));
+}
+
+VkResult allocMemory (VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem)
+{
+ VK_NULL_RETURN(*pMem = VkDeviceMemory((deUint64)(deUintptr)new DeviceMemory(device, pAllocInfo)));
+}
+
+VkResult createFence (VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence)
+{
+ VK_NULL_RETURN(*pFence = VkFence((deUint64)(deUintptr)new Fence(device, pCreateInfo)));
+}
+
+VkResult createSemaphore (VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore)
+{
+ VK_NULL_RETURN(*pSemaphore = VkSemaphore((deUint64)(deUintptr)new Semaphore(device, pCreateInfo)));
+}
+
+VkResult createEvent (VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent)
+{
+ VK_NULL_RETURN(*pEvent = VkEvent((deUint64)(deUintptr)new Event(device, pCreateInfo)));
+}
+
+VkResult createQueryPool (VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool)
+{
+ VK_NULL_RETURN(*pQueryPool = VkQueryPool((deUint64)(deUintptr)new QueryPool(device, pCreateInfo)));
+}
+
+VkResult createBuffer (VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer)
+{
+ VK_NULL_RETURN(*pBuffer = VkBuffer((deUint64)(deUintptr)new Buffer(device, pCreateInfo)));
+}
+
+VkResult createBufferView (VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView)
+{
+ VK_NULL_RETURN(*pView = VkBufferView((deUint64)(deUintptr)new BufferView(device, pCreateInfo)));
+}
+
+VkResult createImage (VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage)
+{
+ VK_NULL_RETURN(*pImage = VkImage((deUint64)(deUintptr)new Image(device, pCreateInfo)));
+}
+
+VkResult createImageView (VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView)
+{
+ VK_NULL_RETURN(*pView = VkImageView((deUint64)(deUintptr)new ImageView(device, pCreateInfo)));
+}
+
+VkResult createAttachmentView (VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo, VkAttachmentView* pView)
+{
+ VK_NULL_RETURN(*pView = VkAttachmentView((deUint64)(deUintptr)new AttachmentView(device, pCreateInfo)));
+}
+
+VkResult createShaderModule (VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModule* pShaderModule)
+{
+ VK_NULL_RETURN(*pShaderModule = VkShaderModule((deUint64)(deUintptr)new ShaderModule(device, pCreateInfo)));
+}
+
+VkResult createShader (VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader)
+{
+ VK_NULL_RETURN(*pShader = VkShader((deUint64)(deUintptr)new Shader(device, pCreateInfo)));
+}
+
+VkResult createPipelineCache (VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, VkPipelineCache* pPipelineCache)
+{
+ VK_NULL_RETURN(*pPipelineCache = VkPipelineCache((deUint64)(deUintptr)new PipelineCache(device, pCreateInfo)));
+}
+
+VkResult createPipelineLayout (VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, VkPipelineLayout* pPipelineLayout)
+{
+ VK_NULL_RETURN(*pPipelineLayout = VkPipelineLayout((deUint64)(deUintptr)new PipelineLayout(device, pCreateInfo)));
+}
+
+VkResult createSampler (VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler)
+{
+ VK_NULL_RETURN(*pSampler = VkSampler((deUint64)(deUintptr)new Sampler(device, pCreateInfo)));
+}
+
+VkResult createDescriptorSetLayout (VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout)
+{
+ VK_NULL_RETURN(*pSetLayout = VkDescriptorSetLayout((deUint64)(deUintptr)new DescriptorSetLayout(device, pCreateInfo)));
+}
+
+VkResult createDescriptorPool (VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool)
+{
+ VK_NULL_RETURN(*pDescriptorPool = VkDescriptorPool((deUint64)(deUintptr)new DescriptorPool(device, poolUsage, maxSets, pCreateInfo)));
+}
+
+VkResult createDynamicViewportState (VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo, VkDynamicViewportState* pState)
+{
+ VK_NULL_RETURN(*pState = VkDynamicViewportState((deUint64)(deUintptr)new DynamicViewportState(device, pCreateInfo)));
+}
+
+VkResult createDynamicRasterState (VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo, VkDynamicRasterState* pState)
+{
+ VK_NULL_RETURN(*pState = VkDynamicRasterState((deUint64)(deUintptr)new DynamicRasterState(device, pCreateInfo)));
+}
+
+VkResult createDynamicColorBlendState (VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo, VkDynamicColorBlendState* pState)
+{
+ VK_NULL_RETURN(*pState = VkDynamicColorBlendState((deUint64)(deUintptr)new DynamicColorBlendState(device, pCreateInfo)));
+}
+
+VkResult createDynamicDepthStencilState (VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo, VkDynamicDepthStencilState* pState)
+{
+ VK_NULL_RETURN(*pState = VkDynamicDepthStencilState((deUint64)(deUintptr)new DynamicDepthStencilState(device, pCreateInfo)));
+}
+
+VkResult createFramebuffer (VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer)
+{
+ VK_NULL_RETURN(*pFramebuffer = VkFramebuffer((deUint64)(deUintptr)new Framebuffer(device, pCreateInfo)));
+}
+
+VkResult createRenderPass (VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass)
+{
+ VK_NULL_RETURN(*pRenderPass = VkRenderPass((deUint64)(deUintptr)new RenderPass(device, pCreateInfo)));
+}
+
+VkResult createCommandPool (VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo, VkCmdPool* pCmdPool)
+{
+ VK_NULL_RETURN(*pCmdPool = VkCmdPool((deUint64)(deUintptr)new CmdPool(device, pCreateInfo)));
+}
+
+VkResult createCommandBuffer (VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer)
+{
+ VK_NULL_RETURN(*pCmdBuffer = reinterpret_cast<VkCmdBuffer>(new CmdBuffer(device, pCreateInfo)));
+}
+
+VkResult destroyInstance (VkInstance instance)
+{
+ VK_NULL_RETURN(delete reinterpret_cast<Instance*>(instance));
+}
+
+VkResult destroyDevice (VkDevice device)
+{
+ VK_NULL_RETURN(delete reinterpret_cast<Device*>(device));
+}
+
+VkResult freeMemory (VkDevice device, VkDeviceMemory mem)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DeviceMemory*>((deUintptr)mem.getInternal()));
+}
+
+VkResult destroyFence (VkDevice device, VkFence fence)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Fence*>((deUintptr)fence.getInternal()));
+}
+
+VkResult destroySemaphore (VkDevice device, VkSemaphore semaphore)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Semaphore*>((deUintptr)semaphore.getInternal()));
+}
+
+VkResult destroyEvent (VkDevice device, VkEvent event)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Event*>((deUintptr)event.getInternal()));
+}
+
+VkResult destroyQueryPool (VkDevice device, VkQueryPool queryPool)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<QueryPool*>((deUintptr)queryPool.getInternal()));
+}
+
+VkResult destroyBuffer (VkDevice device, VkBuffer buffer)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Buffer*>((deUintptr)buffer.getInternal()));
+}
+
+VkResult destroyBufferView (VkDevice device, VkBufferView bufferView)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<BufferView*>((deUintptr)bufferView.getInternal()));
+}
+
+VkResult destroyImage (VkDevice device, VkImage image)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Image*>((deUintptr)image.getInternal()));
+}
+
+VkResult destroyImageView (VkDevice device, VkImageView imageView)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<ImageView*>((deUintptr)imageView.getInternal()));
+}
+
+VkResult destroyAttachmentView (VkDevice device, VkAttachmentView attachmentView)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<AttachmentView*>((deUintptr)attachmentView.getInternal()));
+}
+
+VkResult destroyShaderModule (VkDevice device, VkShaderModule shaderModule)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<ShaderModule*>((deUintptr)shaderModule.getInternal()));
+}
+
+VkResult destroyShader (VkDevice device, VkShader shader)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Shader*>((deUintptr)shader.getInternal()));
+}
+
+VkResult destroyPipelineCache (VkDevice device, VkPipelineCache pipelineCache)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<PipelineCache*>((deUintptr)pipelineCache.getInternal()));
+}
+
+VkResult destroyPipeline (VkDevice device, VkPipeline pipeline)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Pipeline*>((deUintptr)pipeline.getInternal()));
+}
+
+VkResult destroyPipelineLayout (VkDevice device, VkPipelineLayout pipelineLayout)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<PipelineLayout*>((deUintptr)pipelineLayout.getInternal()));
+}
+
+VkResult destroySampler (VkDevice device, VkSampler sampler)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Sampler*>((deUintptr)sampler.getInternal()));
+}
+
+VkResult destroyDescriptorSetLayout (VkDevice device, VkDescriptorSetLayout descriptorSetLayout)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DescriptorSetLayout*>((deUintptr)descriptorSetLayout.getInternal()));
+}
+
+VkResult destroyDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DescriptorPool*>((deUintptr)descriptorPool.getInternal()));
+}
+
+VkResult destroyDynamicViewportState (VkDevice device, VkDynamicViewportState dynamicViewportState)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DynamicViewportState*>((deUintptr)dynamicViewportState.getInternal()));
+}
+
+VkResult destroyDynamicRasterState (VkDevice device, VkDynamicRasterState dynamicRasterState)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DynamicRasterState*>((deUintptr)dynamicRasterState.getInternal()));
+}
+
+VkResult destroyDynamicColorBlendState (VkDevice device, VkDynamicColorBlendState dynamicColorBlendState)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DynamicColorBlendState*>((deUintptr)dynamicColorBlendState.getInternal()));
+}
+
+VkResult destroyDynamicDepthStencilState (VkDevice device, VkDynamicDepthStencilState dynamicDepthStencilState)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<DynamicDepthStencilState*>((deUintptr)dynamicDepthStencilState.getInternal()));
+}
+
+VkResult destroyFramebuffer (VkDevice device, VkFramebuffer framebuffer)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<Framebuffer*>((deUintptr)framebuffer.getInternal()));
+}
+
+VkResult destroyRenderPass (VkDevice device, VkRenderPass renderPass)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<RenderPass*>((deUintptr)renderPass.getInternal()));
+}
+
+VkResult destroyCommandPool (VkDevice device, VkCmdPool cmdPool)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<CmdPool*>((deUintptr)cmdPool.getInternal()));
+}
+
+VkResult destroyCommandBuffer (VkDevice device, VkCmdBuffer commandBuffer)
+{
+ DE_UNREF(device);
+ VK_NULL_RETURN(delete reinterpret_cast<CmdBuffer*>(commandBuffer));
+}
+
+VkResult getPhysicalDeviceFeatures (VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(pFeatures);
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(format);
+ DE_UNREF(pFormatProperties);
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageFormatProperties* pImageFormatProperties)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(format);
+ DE_UNREF(type);
+ DE_UNREF(tiling);
+ DE_UNREF(usage);
+ DE_UNREF(pImageFormatProperties);
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceLimits (VkPhysicalDevice physicalDevice, VkPhysicalDeviceLimits* pLimits)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(pLimits);
+ return VK_SUCCESS;
+}
+
+VkResult getGlobalExtensionProperties (const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties)
+{
+ DE_UNREF(pLayerName);
+ DE_UNREF(pCount);
+ DE_UNREF(pProperties);
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceExtensionProperties (VkPhysicalDevice physicalDevice, const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(pLayerName);
+ DE_UNREF(pCount);
+ DE_UNREF(pProperties);
+ return VK_SUCCESS;
+}
+
+VkResult getGlobalLayerProperties (deUint32* pCount, VkLayerProperties* pProperties)
+{
+ DE_UNREF(pCount);
+ DE_UNREF(pProperties);
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceLayerProperties (VkPhysicalDevice physicalDevice, deUint32* pCount, VkLayerProperties* pProperties)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(pCount);
+ DE_UNREF(pProperties);
+ return VK_SUCCESS;
+}
+
+VkResult getDeviceQueue (VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex, VkQueue* pQueue)
+{
+ DE_UNREF(device);
+ DE_UNREF(queueFamilyIndex);
+ DE_UNREF(queueIndex);
+ DE_UNREF(pQueue);
+ return VK_SUCCESS;
+}
+
+VkResult queueSubmit (VkQueue queue, deUint32 cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence)
+{
+ DE_UNREF(queue);
+ DE_UNREF(cmdBufferCount);
+ DE_UNREF(pCmdBuffers);
+ DE_UNREF(fence);
+ return VK_SUCCESS;
+}
+
+VkResult queueWaitIdle (VkQueue queue)
+{
+ DE_UNREF(queue);
+ return VK_SUCCESS;
+}
+
+VkResult deviceWaitIdle (VkDevice device)
+{
+ DE_UNREF(device);
+ return VK_SUCCESS;
+}
+
+VkResult unmapMemory (VkDevice device, VkDeviceMemory mem)
+{
+ DE_UNREF(device);
+ DE_UNREF(mem);
+ return VK_SUCCESS;
+}
+
+VkResult flushMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges)
+{
+ DE_UNREF(device);
+ DE_UNREF(memRangeCount);
+ DE_UNREF(pMemRanges);
+ return VK_SUCCESS;
+}
+
+VkResult invalidateMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges)
+{
+ DE_UNREF(device);
+ DE_UNREF(memRangeCount);
+ DE_UNREF(pMemRanges);
+ return VK_SUCCESS;
+}
+
+VkResult getDeviceMemoryCommitment (VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes)
+{
+ DE_UNREF(device);
+ DE_UNREF(memory);
+ DE_UNREF(pCommittedMemoryInBytes);
+ return VK_SUCCESS;
+}
+
+VkResult bindBufferMemory (VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset)
+{
+ DE_UNREF(device);
+ DE_UNREF(buffer);
+ DE_UNREF(mem);
+ DE_UNREF(memOffset);
+ return VK_SUCCESS;
+}
+
+VkResult bindImageMemory (VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset)
+{
+ DE_UNREF(device);
+ DE_UNREF(image);
+ DE_UNREF(mem);
+ DE_UNREF(memOffset);
+ return VK_SUCCESS;
+}
+
+VkResult getImageSparseMemoryRequirements (VkDevice device, VkImage image, deUint32* pNumRequirements, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
+{
+ DE_UNREF(device);
+ DE_UNREF(image);
+ DE_UNREF(pNumRequirements);
+ DE_UNREF(pSparseMemoryRequirements);
+ return VK_SUCCESS;
+}
+
+VkResult getPhysicalDeviceSparseImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, deUint32 samples, VkImageUsageFlags usage, VkImageTiling tiling, deUint32* pNumProperties, VkSparseImageFormatProperties* pProperties)
+{
+ DE_UNREF(physicalDevice);
+ DE_UNREF(format);
+ DE_UNREF(type);
+ DE_UNREF(samples);
+ DE_UNREF(usage);
+ DE_UNREF(tiling);
+ DE_UNREF(pNumProperties);
+ DE_UNREF(pProperties);
+ return VK_SUCCESS;
+}
+
+VkResult queueBindSparseBufferMemory (VkQueue queue, VkBuffer buffer, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo)
+{
+ DE_UNREF(queue);
+ DE_UNREF(buffer);
+ DE_UNREF(numBindings);
+ DE_UNREF(pBindInfo);
+ return VK_SUCCESS;
+}
+
+VkResult queueBindSparseImageOpaqueMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo)
+{
+ DE_UNREF(queue);
+ DE_UNREF(image);
+ DE_UNREF(numBindings);
+ DE_UNREF(pBindInfo);
+ return VK_SUCCESS;
+}
+
+VkResult queueBindSparseImageMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseImageMemoryBindInfo* pBindInfo)
+{
+ DE_UNREF(queue);
+ DE_UNREF(image);
+ DE_UNREF(numBindings);
+ DE_UNREF(pBindInfo);
+ return VK_SUCCESS;
+}
+
+VkResult resetFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences)
+{
+ DE_UNREF(device);
+ DE_UNREF(fenceCount);
+ DE_UNREF(pFences);
+ return VK_SUCCESS;
+}
+
+VkResult getFenceStatus (VkDevice device, VkFence fence)
+{
+ DE_UNREF(device);
+ DE_UNREF(fence);
+ return VK_SUCCESS;
+}
+
+VkResult waitForFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences, VkBool32 waitAll, deUint64 timeout)
+{
+ DE_UNREF(device);
+ DE_UNREF(fenceCount);
+ DE_UNREF(pFences);
+ DE_UNREF(waitAll);
+ DE_UNREF(timeout);
+ return VK_SUCCESS;
+}
+
+VkResult queueSignalSemaphore (VkQueue queue, VkSemaphore semaphore)
+{
+ DE_UNREF(queue);
+ DE_UNREF(semaphore);
+ return VK_SUCCESS;
+}
+
+VkResult queueWaitSemaphore (VkQueue queue, VkSemaphore semaphore)
+{
+ DE_UNREF(queue);
+ DE_UNREF(semaphore);
+ return VK_SUCCESS;
+}
+
+VkResult getEventStatus (VkDevice device, VkEvent event)
+{
+ DE_UNREF(device);
+ DE_UNREF(event);
+ return VK_SUCCESS;
+}
+
+VkResult setEvent (VkDevice device, VkEvent event)
+{
+ DE_UNREF(device);
+ DE_UNREF(event);
+ return VK_SUCCESS;
+}
+
+VkResult resetEvent (VkDevice device, VkEvent event)
+{
+ DE_UNREF(device);
+ DE_UNREF(event);
+ return VK_SUCCESS;
+}
+
+VkResult getQueryPoolResults (VkDevice device, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, deUintptr* pDataSize, void* pData, VkQueryResultFlags flags)
+{
+ DE_UNREF(device);
+ DE_UNREF(queryPool);
+ DE_UNREF(startQuery);
+ DE_UNREF(queryCount);
+ DE_UNREF(pDataSize);
+ DE_UNREF(pData);
+ DE_UNREF(flags);
+ return VK_SUCCESS;
+}
+
+VkResult getImageSubresourceLayout (VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
+{
+ DE_UNREF(device);
+ DE_UNREF(image);
+ DE_UNREF(pSubresource);
+ DE_UNREF(pLayout);
+ return VK_SUCCESS;
+}
+
+deUintptr getPipelineCacheSize (VkDevice device, VkPipelineCache pipelineCache)
+{
+ DE_UNREF(device);
+ DE_UNREF(pipelineCache);
+ return VK_SUCCESS;
+}
+
+VkResult getPipelineCacheData (VkDevice device, VkPipelineCache pipelineCache, void* pData)
+{
+ DE_UNREF(device);
+ DE_UNREF(pipelineCache);
+ DE_UNREF(pData);
+ return VK_SUCCESS;
+}
+
+VkResult mergePipelineCaches (VkDevice device, VkPipelineCache destCache, deUint32 srcCacheCount, const VkPipelineCache* pSrcCaches)
+{
+ DE_UNREF(device);
+ DE_UNREF(destCache);
+ DE_UNREF(srcCacheCount);
+ DE_UNREF(pSrcCaches);
+ return VK_SUCCESS;
+}
+
+VkResult resetDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool)
+{
+ DE_UNREF(device);
+ DE_UNREF(descriptorPool);
+ return VK_SUCCESS;
+}
+
+VkResult updateDescriptorSets (VkDevice device, deUint32 writeCount, const VkWriteDescriptorSet* pDescriptorWrites, deUint32 copyCount, const VkCopyDescriptorSet* pDescriptorCopies)
+{
+ DE_UNREF(device);
+ DE_UNREF(writeCount);
+ DE_UNREF(pDescriptorWrites);
+ DE_UNREF(copyCount);
+ DE_UNREF(pDescriptorCopies);
+ return VK_SUCCESS;
+}
+
+VkResult getRenderAreaGranularity (VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
+{
+ DE_UNREF(device);
+ DE_UNREF(renderPass);
+ DE_UNREF(pGranularity);
+ return VK_SUCCESS;
+}
+
+VkResult resetCommandPool (VkDevice device, VkCmdPool cmdPool, VkCmdPoolResetFlags flags)
+{
+ DE_UNREF(device);
+ DE_UNREF(cmdPool);
+ DE_UNREF(flags);
+ return VK_SUCCESS;
+}
+
+VkResult beginCommandBuffer (VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(pBeginInfo);
+ return VK_SUCCESS;
+}
+
+VkResult endCommandBuffer (VkCmdBuffer cmdBuffer)
+{
+ DE_UNREF(cmdBuffer);
+ return VK_SUCCESS;
+}
+
+VkResult resetCommandBuffer (VkCmdBuffer cmdBuffer, VkCmdBufferResetFlags flags)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(flags);
+ return VK_SUCCESS;
+}
+
+void cmdBindPipeline (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(pipelineBindPoint);
+ DE_UNREF(pipeline);
+}
+
+void cmdBindDynamicViewportState (VkCmdBuffer cmdBuffer, VkDynamicViewportState dynamicViewportState)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(dynamicViewportState);
+}
+
+void cmdBindDynamicRasterState (VkCmdBuffer cmdBuffer, VkDynamicRasterState dynamicRasterState)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(dynamicRasterState);
+}
+
+void cmdBindDynamicColorBlendState (VkCmdBuffer cmdBuffer, VkDynamicColorBlendState dynamicColorBlendState)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(dynamicColorBlendState);
+}
+
+void cmdBindDynamicDepthStencilState (VkCmdBuffer cmdBuffer, VkDynamicDepthStencilState dynamicDepthStencilState)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(dynamicDepthStencilState);
+}
+
+void cmdBindDescriptorSets (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, deUint32 firstSet, deUint32 setCount, const VkDescriptorSet* pDescriptorSets, deUint32 dynamicOffsetCount, const deUint32* pDynamicOffsets)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(pipelineBindPoint);
+ DE_UNREF(layout);
+ DE_UNREF(firstSet);
+ DE_UNREF(setCount);
+ DE_UNREF(pDescriptorSets);
+ DE_UNREF(dynamicOffsetCount);
+ DE_UNREF(pDynamicOffsets);
+}
+
+void cmdBindIndexBuffer (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(buffer);
+ DE_UNREF(offset);
+ DE_UNREF(indexType);
+}
+
+void cmdBindVertexBuffers (VkCmdBuffer cmdBuffer, deUint32 startBinding, deUint32 bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(startBinding);
+ DE_UNREF(bindingCount);
+ DE_UNREF(pBuffers);
+ DE_UNREF(pOffsets);
+}
+
+void cmdDraw (VkCmdBuffer cmdBuffer, deUint32 firstVertex, deUint32 vertexCount, deUint32 firstInstance, deUint32 instanceCount)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(firstVertex);
+ DE_UNREF(vertexCount);
+ DE_UNREF(firstInstance);
+ DE_UNREF(instanceCount);
+}
+
+void cmdDrawIndexed (VkCmdBuffer cmdBuffer, deUint32 firstIndex, deUint32 indexCount, deInt32 vertexOffset, deUint32 firstInstance, deUint32 instanceCount)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(firstIndex);
+ DE_UNREF(indexCount);
+ DE_UNREF(vertexOffset);
+ DE_UNREF(firstInstance);
+ DE_UNREF(instanceCount);
+}
+
+void cmdDrawIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(buffer);
+ DE_UNREF(offset);
+ DE_UNREF(count);
+ DE_UNREF(stride);
+}
+
+void cmdDrawIndexedIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(buffer);
+ DE_UNREF(offset);
+ DE_UNREF(count);
+ DE_UNREF(stride);
+}
+
+void cmdDispatch (VkCmdBuffer cmdBuffer, deUint32 x, deUint32 y, deUint32 z)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(x);
+ DE_UNREF(y);
+ DE_UNREF(z);
+}
+
+void cmdDispatchIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(buffer);
+ DE_UNREF(offset);
+}
+
+void cmdCopyBuffer (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, deUint32 regionCount, const VkBufferCopy* pRegions)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcBuffer);
+ DE_UNREF(destBuffer);
+ DE_UNREF(regionCount);
+ DE_UNREF(pRegions);
+}
+
+void cmdCopyImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageCopy* pRegions)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcImage);
+ DE_UNREF(srcImageLayout);
+ DE_UNREF(destImage);
+ DE_UNREF(destImageLayout);
+ DE_UNREF(regionCount);
+ DE_UNREF(pRegions);
+}
+
+void cmdBlitImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageBlit* pRegions, VkTexFilter filter)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcImage);
+ DE_UNREF(srcImageLayout);
+ DE_UNREF(destImage);
+ DE_UNREF(destImageLayout);
+ DE_UNREF(regionCount);
+ DE_UNREF(pRegions);
+ DE_UNREF(filter);
+}
+
+void cmdCopyBufferToImage (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkBufferImageCopy* pRegions)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcBuffer);
+ DE_UNREF(destImage);
+ DE_UNREF(destImageLayout);
+ DE_UNREF(regionCount);
+ DE_UNREF(pRegions);
+}
+
+void cmdCopyImageToBuffer (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, deUint32 regionCount, const VkBufferImageCopy* pRegions)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcImage);
+ DE_UNREF(srcImageLayout);
+ DE_UNREF(destBuffer);
+ DE_UNREF(regionCount);
+ DE_UNREF(pRegions);
+}
+
+void cmdUpdateBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const deUint32* pData)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(destBuffer);
+ DE_UNREF(destOffset);
+ DE_UNREF(dataSize);
+ DE_UNREF(pData);
+}
+
+void cmdFillBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, deUint32 data)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(destBuffer);
+ DE_UNREF(destOffset);
+ DE_UNREF(fillSize);
+ DE_UNREF(data);
+}
+
+void cmdClearColorImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rangeCount, const VkImageSubresourceRange* pRanges)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(image);
+ DE_UNREF(imageLayout);
+ DE_UNREF(pColor);
+ DE_UNREF(rangeCount);
+ DE_UNREF(pRanges);
+}
+
+void cmdClearDepthStencilImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rangeCount, const VkImageSubresourceRange* pRanges)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(image);
+ DE_UNREF(imageLayout);
+ DE_UNREF(depth);
+ DE_UNREF(stencil);
+ DE_UNREF(rangeCount);
+ DE_UNREF(pRanges);
+}
+
+void cmdClearColorAttachment (VkCmdBuffer cmdBuffer, deUint32 colorAttachment, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rectCount, const VkRect3D* pRects)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(colorAttachment);
+ DE_UNREF(imageLayout);
+ DE_UNREF(pColor);
+ DE_UNREF(rectCount);
+ DE_UNREF(pRects);
+}
+
+void cmdClearDepthStencilAttachment (VkCmdBuffer cmdBuffer, VkImageAspectFlags imageAspectMask, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rectCount, const VkRect3D* pRects)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(imageAspectMask);
+ DE_UNREF(imageLayout);
+ DE_UNREF(depth);
+ DE_UNREF(stencil);
+ DE_UNREF(rectCount);
+ DE_UNREF(pRects);
+}
+
+void cmdResolveImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageResolve* pRegions)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcImage);
+ DE_UNREF(srcImageLayout);
+ DE_UNREF(destImage);
+ DE_UNREF(destImageLayout);
+ DE_UNREF(regionCount);
+ DE_UNREF(pRegions);
+}
+
+void cmdSetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(event);
+ DE_UNREF(stageMask);
+}
+
+void cmdResetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(event);
+ DE_UNREF(stageMask);
+}
+
+void cmdWaitEvents (VkCmdBuffer cmdBuffer, deUint32 eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, deUint32 memBarrierCount, const void* const* ppMemBarriers)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(eventCount);
+ DE_UNREF(pEvents);
+ DE_UNREF(srcStageMask);
+ DE_UNREF(destStageMask);
+ DE_UNREF(memBarrierCount);
+ DE_UNREF(ppMemBarriers);
+}
+
+void cmdPipelineBarrier (VkCmdBuffer cmdBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, VkBool32 byRegion, deUint32 memBarrierCount, const void* const* ppMemBarriers)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(srcStageMask);
+ DE_UNREF(destStageMask);
+ DE_UNREF(byRegion);
+ DE_UNREF(memBarrierCount);
+ DE_UNREF(ppMemBarriers);
+}
+
+void cmdBeginQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot, VkQueryControlFlags flags)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(queryPool);
+ DE_UNREF(slot);
+ DE_UNREF(flags);
+}
+
+void cmdEndQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(queryPool);
+ DE_UNREF(slot);
+}
+
+void cmdResetQueryPool (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(queryPool);
+ DE_UNREF(startQuery);
+ DE_UNREF(queryCount);
+}
+
+void cmdWriteTimestamp (VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(timestampType);
+ DE_UNREF(destBuffer);
+ DE_UNREF(destOffset);
+}
+
+void cmdCopyQueryPoolResults (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize destStride, VkQueryResultFlags flags)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(queryPool);
+ DE_UNREF(startQuery);
+ DE_UNREF(queryCount);
+ DE_UNREF(destBuffer);
+ DE_UNREF(destOffset);
+ DE_UNREF(destStride);
+ DE_UNREF(flags);
+}
+
+void cmdPushConstants (VkCmdBuffer cmdBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, deUint32 start, deUint32 length, const void* values)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(layout);
+ DE_UNREF(stageFlags);
+ DE_UNREF(start);
+ DE_UNREF(length);
+ DE_UNREF(values);
+}
+
+void cmdBeginRenderPass (VkCmdBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkRenderPassContents contents)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(pRenderPassBegin);
+ DE_UNREF(contents);
+}
+
+void cmdNextSubpass (VkCmdBuffer cmdBuffer, VkRenderPassContents contents)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(contents);
+}
+
+void cmdEndRenderPass (VkCmdBuffer cmdBuffer)
+{
+ DE_UNREF(cmdBuffer);
+}
+
+void cmdExecuteCommands (VkCmdBuffer cmdBuffer, deUint32 cmdBuffersCount, const VkCmdBuffer* pCmdBuffers)
+{
+ DE_UNREF(cmdBuffer);
+ DE_UNREF(cmdBuffersCount);
+ DE_UNREF(pCmdBuffers);
+}
+
+static const tcu::StaticFunctionLibrary::Entry s_platformFunctions[] =
+{
+ VK_NULL_FUNC_ENTRY(vkCreateInstance, createInstance),
+ VK_NULL_FUNC_ENTRY(vkGetInstanceProcAddr, getInstanceProcAddr),
+};
+
+static const tcu::StaticFunctionLibrary::Entry s_instanceFunctions[] =
+{
+ VK_NULL_FUNC_ENTRY(vkDestroyInstance, destroyInstance),
+ VK_NULL_FUNC_ENTRY(vkEnumeratePhysicalDevices, enumeratePhysicalDevices),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceFeatures, getPhysicalDeviceFeatures),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceFormatProperties, getPhysicalDeviceFormatProperties),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceImageFormatProperties, getPhysicalDeviceImageFormatProperties),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceLimits, getPhysicalDeviceLimits),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceProperties, getPhysicalDeviceProperties),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceQueueCount, getPhysicalDeviceQueueCount),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceQueueProperties, getPhysicalDeviceQueueProperties),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceMemoryProperties, getPhysicalDeviceMemoryProperties),
+ VK_NULL_FUNC_ENTRY(vkGetDeviceProcAddr, getDeviceProcAddr),
+ VK_NULL_FUNC_ENTRY(vkCreateDevice, createDevice),
+};
+
+static const tcu::StaticFunctionLibrary::Entry s_deviceFunctions[] =
+{
+ VK_NULL_FUNC_ENTRY(vkDestroyDevice, destroyDevice),
+ VK_NULL_FUNC_ENTRY(vkGetGlobalExtensionProperties, getGlobalExtensionProperties),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceExtensionProperties, getPhysicalDeviceExtensionProperties),
+ VK_NULL_FUNC_ENTRY(vkGetGlobalLayerProperties, getGlobalLayerProperties),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceLayerProperties, getPhysicalDeviceLayerProperties),
+ VK_NULL_FUNC_ENTRY(vkGetDeviceQueue, getDeviceQueue),
+ VK_NULL_FUNC_ENTRY(vkQueueSubmit, queueSubmit),
+ VK_NULL_FUNC_ENTRY(vkQueueWaitIdle, queueWaitIdle),
+ VK_NULL_FUNC_ENTRY(vkDeviceWaitIdle, deviceWaitIdle),
+ VK_NULL_FUNC_ENTRY(vkAllocMemory, allocMemory),
+ VK_NULL_FUNC_ENTRY(vkFreeMemory, freeMemory),
+ VK_NULL_FUNC_ENTRY(vkMapMemory, mapMemory),
+ VK_NULL_FUNC_ENTRY(vkUnmapMemory, unmapMemory),
+ VK_NULL_FUNC_ENTRY(vkFlushMappedMemoryRanges, flushMappedMemoryRanges),
+ VK_NULL_FUNC_ENTRY(vkInvalidateMappedMemoryRanges, invalidateMappedMemoryRanges),
+ VK_NULL_FUNC_ENTRY(vkGetDeviceMemoryCommitment, getDeviceMemoryCommitment),
+ VK_NULL_FUNC_ENTRY(vkBindBufferMemory, bindBufferMemory),
+ VK_NULL_FUNC_ENTRY(vkBindImageMemory, bindImageMemory),
+ VK_NULL_FUNC_ENTRY(vkGetBufferMemoryRequirements, getBufferMemoryRequirements),
+ VK_NULL_FUNC_ENTRY(vkGetImageMemoryRequirements, getImageMemoryRequirements),
+ VK_NULL_FUNC_ENTRY(vkGetImageSparseMemoryRequirements, getImageSparseMemoryRequirements),
+ VK_NULL_FUNC_ENTRY(vkGetPhysicalDeviceSparseImageFormatProperties, getPhysicalDeviceSparseImageFormatProperties),
+ VK_NULL_FUNC_ENTRY(vkQueueBindSparseBufferMemory, queueBindSparseBufferMemory),
+ VK_NULL_FUNC_ENTRY(vkQueueBindSparseImageOpaqueMemory, queueBindSparseImageOpaqueMemory),
+ VK_NULL_FUNC_ENTRY(vkQueueBindSparseImageMemory, queueBindSparseImageMemory),
+ VK_NULL_FUNC_ENTRY(vkCreateFence, createFence),
+ VK_NULL_FUNC_ENTRY(vkDestroyFence, destroyFence),
+ VK_NULL_FUNC_ENTRY(vkResetFences, resetFences),
+ VK_NULL_FUNC_ENTRY(vkGetFenceStatus, getFenceStatus),
+ VK_NULL_FUNC_ENTRY(vkWaitForFences, waitForFences),
+ VK_NULL_FUNC_ENTRY(vkCreateSemaphore, createSemaphore),
+ VK_NULL_FUNC_ENTRY(vkDestroySemaphore, destroySemaphore),
+ VK_NULL_FUNC_ENTRY(vkQueueSignalSemaphore, queueSignalSemaphore),
+ VK_NULL_FUNC_ENTRY(vkQueueWaitSemaphore, queueWaitSemaphore),
+ VK_NULL_FUNC_ENTRY(vkCreateEvent, createEvent),
+ VK_NULL_FUNC_ENTRY(vkDestroyEvent, destroyEvent),
+ VK_NULL_FUNC_ENTRY(vkGetEventStatus, getEventStatus),
+ VK_NULL_FUNC_ENTRY(vkSetEvent, setEvent),
+ VK_NULL_FUNC_ENTRY(vkResetEvent, resetEvent),
+ VK_NULL_FUNC_ENTRY(vkCreateQueryPool, createQueryPool),
+ VK_NULL_FUNC_ENTRY(vkDestroyQueryPool, destroyQueryPool),
+ VK_NULL_FUNC_ENTRY(vkGetQueryPoolResults, getQueryPoolResults),
+ VK_NULL_FUNC_ENTRY(vkCreateBuffer, createBuffer),
+ VK_NULL_FUNC_ENTRY(vkDestroyBuffer, destroyBuffer),
+ VK_NULL_FUNC_ENTRY(vkCreateBufferView, createBufferView),
+ VK_NULL_FUNC_ENTRY(vkDestroyBufferView, destroyBufferView),
+ VK_NULL_FUNC_ENTRY(vkCreateImage, createImage),
+ VK_NULL_FUNC_ENTRY(vkDestroyImage, destroyImage),
+ VK_NULL_FUNC_ENTRY(vkGetImageSubresourceLayout, getImageSubresourceLayout),
+ VK_NULL_FUNC_ENTRY(vkCreateImageView, createImageView),
+ VK_NULL_FUNC_ENTRY(vkDestroyImageView, destroyImageView),
+ VK_NULL_FUNC_ENTRY(vkCreateAttachmentView, createAttachmentView),
+ VK_NULL_FUNC_ENTRY(vkDestroyAttachmentView, destroyAttachmentView),
+ VK_NULL_FUNC_ENTRY(vkCreateShaderModule, createShaderModule),
+ VK_NULL_FUNC_ENTRY(vkDestroyShaderModule, destroyShaderModule),
+ VK_NULL_FUNC_ENTRY(vkCreateShader, createShader),
+ VK_NULL_FUNC_ENTRY(vkDestroyShader, destroyShader),
+ VK_NULL_FUNC_ENTRY(vkCreatePipelineCache, createPipelineCache),
+ VK_NULL_FUNC_ENTRY(vkDestroyPipelineCache, destroyPipelineCache),
+ VK_NULL_FUNC_ENTRY(vkGetPipelineCacheSize, getPipelineCacheSize),
+ VK_NULL_FUNC_ENTRY(vkGetPipelineCacheData, getPipelineCacheData),
+ VK_NULL_FUNC_ENTRY(vkMergePipelineCaches, mergePipelineCaches),
+ VK_NULL_FUNC_ENTRY(vkCreateGraphicsPipelines, createGraphicsPipelines),
+ VK_NULL_FUNC_ENTRY(vkCreateComputePipelines, createComputePipelines),
+ VK_NULL_FUNC_ENTRY(vkDestroyPipeline, destroyPipeline),
+ VK_NULL_FUNC_ENTRY(vkCreatePipelineLayout, createPipelineLayout),
+ VK_NULL_FUNC_ENTRY(vkDestroyPipelineLayout, destroyPipelineLayout),
+ VK_NULL_FUNC_ENTRY(vkCreateSampler, createSampler),
+ VK_NULL_FUNC_ENTRY(vkDestroySampler, destroySampler),
+ VK_NULL_FUNC_ENTRY(vkCreateDescriptorSetLayout, createDescriptorSetLayout),
+ VK_NULL_FUNC_ENTRY(vkDestroyDescriptorSetLayout, destroyDescriptorSetLayout),
+ VK_NULL_FUNC_ENTRY(vkCreateDescriptorPool, createDescriptorPool),
+ VK_NULL_FUNC_ENTRY(vkDestroyDescriptorPool, destroyDescriptorPool),
+ VK_NULL_FUNC_ENTRY(vkResetDescriptorPool, resetDescriptorPool),
+ VK_NULL_FUNC_ENTRY(vkAllocDescriptorSets, allocDescriptorSets),
+ VK_NULL_FUNC_ENTRY(vkFreeDescriptorSets, freeDescriptorSets),
+ VK_NULL_FUNC_ENTRY(vkUpdateDescriptorSets, updateDescriptorSets),
+ VK_NULL_FUNC_ENTRY(vkCreateDynamicViewportState, createDynamicViewportState),
+ VK_NULL_FUNC_ENTRY(vkDestroyDynamicViewportState, destroyDynamicViewportState),
+ VK_NULL_FUNC_ENTRY(vkCreateDynamicRasterState, createDynamicRasterState),
+ VK_NULL_FUNC_ENTRY(vkDestroyDynamicRasterState, destroyDynamicRasterState),
+ VK_NULL_FUNC_ENTRY(vkCreateDynamicColorBlendState, createDynamicColorBlendState),
+ VK_NULL_FUNC_ENTRY(vkDestroyDynamicColorBlendState, destroyDynamicColorBlendState),
+ VK_NULL_FUNC_ENTRY(vkCreateDynamicDepthStencilState, createDynamicDepthStencilState),
+ VK_NULL_FUNC_ENTRY(vkDestroyDynamicDepthStencilState, destroyDynamicDepthStencilState),
+ VK_NULL_FUNC_ENTRY(vkCreateFramebuffer, createFramebuffer),
+ VK_NULL_FUNC_ENTRY(vkDestroyFramebuffer, destroyFramebuffer),
+ VK_NULL_FUNC_ENTRY(vkCreateRenderPass, createRenderPass),
+ VK_NULL_FUNC_ENTRY(vkDestroyRenderPass, destroyRenderPass),
+ VK_NULL_FUNC_ENTRY(vkGetRenderAreaGranularity, getRenderAreaGranularity),
+ VK_NULL_FUNC_ENTRY(vkCreateCommandPool, createCommandPool),
+ VK_NULL_FUNC_ENTRY(vkDestroyCommandPool, destroyCommandPool),
+ VK_NULL_FUNC_ENTRY(vkResetCommandPool, resetCommandPool),
+ VK_NULL_FUNC_ENTRY(vkCreateCommandBuffer, createCommandBuffer),
+ VK_NULL_FUNC_ENTRY(vkDestroyCommandBuffer, destroyCommandBuffer),
+ VK_NULL_FUNC_ENTRY(vkBeginCommandBuffer, beginCommandBuffer),
+ VK_NULL_FUNC_ENTRY(vkEndCommandBuffer, endCommandBuffer),
+ VK_NULL_FUNC_ENTRY(vkResetCommandBuffer, resetCommandBuffer),
+ VK_NULL_FUNC_ENTRY(vkCmdBindPipeline, cmdBindPipeline),
+ VK_NULL_FUNC_ENTRY(vkCmdBindDynamicViewportState, cmdBindDynamicViewportState),
+ VK_NULL_FUNC_ENTRY(vkCmdBindDynamicRasterState, cmdBindDynamicRasterState),
+ VK_NULL_FUNC_ENTRY(vkCmdBindDynamicColorBlendState, cmdBindDynamicColorBlendState),
+ VK_NULL_FUNC_ENTRY(vkCmdBindDynamicDepthStencilState, cmdBindDynamicDepthStencilState),
+ VK_NULL_FUNC_ENTRY(vkCmdBindDescriptorSets, cmdBindDescriptorSets),
+ VK_NULL_FUNC_ENTRY(vkCmdBindIndexBuffer, cmdBindIndexBuffer),
+ VK_NULL_FUNC_ENTRY(vkCmdBindVertexBuffers, cmdBindVertexBuffers),
+ VK_NULL_FUNC_ENTRY(vkCmdDraw, cmdDraw),
+ VK_NULL_FUNC_ENTRY(vkCmdDrawIndexed, cmdDrawIndexed),
+ VK_NULL_FUNC_ENTRY(vkCmdDrawIndirect, cmdDrawIndirect),
+ VK_NULL_FUNC_ENTRY(vkCmdDrawIndexedIndirect, cmdDrawIndexedIndirect),
+ VK_NULL_FUNC_ENTRY(vkCmdDispatch, cmdDispatch),
+ VK_NULL_FUNC_ENTRY(vkCmdDispatchIndirect, cmdDispatchIndirect),
+ VK_NULL_FUNC_ENTRY(vkCmdCopyBuffer, cmdCopyBuffer),
+ VK_NULL_FUNC_ENTRY(vkCmdCopyImage, cmdCopyImage),
+ VK_NULL_FUNC_ENTRY(vkCmdBlitImage, cmdBlitImage),
+ VK_NULL_FUNC_ENTRY(vkCmdCopyBufferToImage, cmdCopyBufferToImage),
+ VK_NULL_FUNC_ENTRY(vkCmdCopyImageToBuffer, cmdCopyImageToBuffer),
+ VK_NULL_FUNC_ENTRY(vkCmdUpdateBuffer, cmdUpdateBuffer),
+ VK_NULL_FUNC_ENTRY(vkCmdFillBuffer, cmdFillBuffer),
+ VK_NULL_FUNC_ENTRY(vkCmdClearColorImage, cmdClearColorImage),
+ VK_NULL_FUNC_ENTRY(vkCmdClearDepthStencilImage, cmdClearDepthStencilImage),
+ VK_NULL_FUNC_ENTRY(vkCmdClearColorAttachment, cmdClearColorAttachment),
+ VK_NULL_FUNC_ENTRY(vkCmdClearDepthStencilAttachment, cmdClearDepthStencilAttachment),
+ VK_NULL_FUNC_ENTRY(vkCmdResolveImage, cmdResolveImage),
+ VK_NULL_FUNC_ENTRY(vkCmdSetEvent, cmdSetEvent),
+ VK_NULL_FUNC_ENTRY(vkCmdResetEvent, cmdResetEvent),
+ VK_NULL_FUNC_ENTRY(vkCmdWaitEvents, cmdWaitEvents),
+ VK_NULL_FUNC_ENTRY(vkCmdPipelineBarrier, cmdPipelineBarrier),
+ VK_NULL_FUNC_ENTRY(vkCmdBeginQuery, cmdBeginQuery),
+ VK_NULL_FUNC_ENTRY(vkCmdEndQuery, cmdEndQuery),
+ VK_NULL_FUNC_ENTRY(vkCmdResetQueryPool, cmdResetQueryPool),
+ VK_NULL_FUNC_ENTRY(vkCmdWriteTimestamp, cmdWriteTimestamp),
+ VK_NULL_FUNC_ENTRY(vkCmdCopyQueryPoolResults, cmdCopyQueryPoolResults),
+ VK_NULL_FUNC_ENTRY(vkCmdPushConstants, cmdPushConstants),
+ VK_NULL_FUNC_ENTRY(vkCmdBeginRenderPass, cmdBeginRenderPass),
+ VK_NULL_FUNC_ENTRY(vkCmdNextSubpass, cmdNextSubpass),
+ VK_NULL_FUNC_ENTRY(vkCmdEndRenderPass, cmdEndRenderPass),
+ VK_NULL_FUNC_ENTRY(vkCmdExecuteCommands, cmdExecuteCommands),
+};
+
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan platform abstraction.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkPlatform.hpp"
+#include "tcuFunctionLibrary.hpp"
+
+namespace vk
+{
+
+PlatformDriver::PlatformDriver (const tcu::FunctionLibrary& library)
+{
+#define GET_PROC_ADDR(NAME) library.getFunction(NAME)
+#include "vkInitPlatformFunctionPointers.inl"
+#undef GET_PROC_ADDR
+}
+
+PlatformDriver::~PlatformDriver (void)
+{
+}
+
+InstanceDriver::InstanceDriver (const PlatformInterface& platformInterface, VkInstance instance)
+{
+#define GET_PROC_ADDR(NAME) platformInterface.getInstanceProcAddr(instance, NAME)
+#include "vkInitInstanceFunctionPointers.inl"
+#undef GET_PROC_ADDR
+}
+
+InstanceDriver::~InstanceDriver (void)
+{
+}
+
+DeviceDriver::DeviceDriver (const InstanceInterface& instanceInterface, VkDevice device)
+{
+#define GET_PROC_ADDR(NAME) instanceInterface.getDeviceProcAddr(device, NAME)
+#include "vkInitDeviceFunctionPointers.inl"
+#undef GET_PROC_ADDR
+}
+
+DeviceDriver::~DeviceDriver (void)
+{
+}
+
+#include "vkPlatformDriverImpl.inl"
+#include "vkInstanceDriverImpl.inl"
+#include "vkDeviceDriverImpl.inl"
+
+} // vk
--- /dev/null
+#ifndef _VKPLATFORM_HPP
+#define _VKPLATFORM_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan platform abstraction.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+
+namespace tcu
+{
+class FunctionLibrary;
+}
+
+namespace vk
+{
+
+class Library
+{
+public:
+ Library (void) {}
+ virtual ~Library (void) {}
+
+ virtual const PlatformInterface& getPlatformInterface (void) const = 0;
+};
+
+class PlatformDriver : public PlatformInterface
+{
+public:
+ PlatformDriver (const tcu::FunctionLibrary& library);
+ ~PlatformDriver (void);
+
+#include "vkConcretePlatformInterface.inl"
+
+protected:
+ struct Functions
+ {
+#include "vkPlatformFunctionPointers.inl"
+ };
+
+ Functions m_vk;
+};
+
+class InstanceDriver : public InstanceInterface
+{
+public:
+ InstanceDriver (const PlatformInterface& platformInterface, VkInstance instance);
+ ~InstanceDriver (void);
+
+#include "vkConcreteInstanceInterface.inl"
+
+protected:
+ struct Functions
+ {
+#include "vkInstanceFunctionPointers.inl"
+ };
+
+ Functions m_vk;
+};
+
+class DeviceDriver : public DeviceInterface
+{
+public:
+ DeviceDriver (const InstanceInterface& instanceInterface, VkDevice device);
+ ~DeviceDriver (void);
+
+#include "vkConcreteDeviceInterface.inl"
+
+protected:
+ struct Functions
+ {
+#include "vkDeviceFunctionPointers.inl"
+ };
+
+ Functions m_vk;
+};
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Vulkan platform interface
+ *//*--------------------------------------------------------------------*/
+class Platform
+{
+public:
+ Platform (void) {}
+ ~Platform (void) {}
+
+ // \todo [2015-01-05 pyry] Parametrize this to select for example debug library / interface?
+ virtual Library* createLibrary (void) const = 0;
+};
+
+} // vk
+
+#endif // _VKPLATFORM_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+
+VkResult PlatformDriver::createInstance (const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance) const
+{
+ return m_vk.createInstance(pCreateInfo, pInstance);
+}
+
+PFN_vkVoidFunction PlatformDriver::getInstanceProcAddr (VkInstance instance, const char* pName) const
+{
+ return m_vk.getInstanceProcAddr(instance, pName);
+}
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+CreateInstanceFunc createInstance;
+GetInstanceProcAddrFunc getInstanceProcAddr;
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Program utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkPrograms.hpp"
+#include "vkGlslToSpirV.hpp"
+#include "vkSpirVAsm.hpp"
+#include "vkRefUtil.hpp"
+
+#include "tcuTestLog.hpp"
+
+#include "deArrayUtil.hpp"
+#include "deMemory.h"
+
+namespace vk
+{
+
+using std::string;
+using std::vector;
+using tcu::TestLog;
+
+// ProgramBinary
+
+ProgramBinary::ProgramBinary (ProgramFormat format, size_t binarySize, const deUint8* binary)
+ : m_format (format)
+ , m_binary (binary, binary+binarySize)
+{
+}
+
+// Utils
+
+ProgramBinary* buildProgram (const glu::ProgramSources& program, ProgramFormat binaryFormat, glu::ShaderProgramInfo* buildInfo)
+{
+ if (binaryFormat == PROGRAM_FORMAT_SPIRV)
+ {
+ vector<deUint8> binary;
+ glslToSpirV(program, &binary, buildInfo);
+ return new ProgramBinary(binaryFormat, binary.size(), &binary[0]);
+ }
+ else
+ TCU_THROW(NotSupportedError, "Unsupported program format");
+}
+
+ProgramBinary* assembleProgram (const SpirVAsmSource& program, SpirVProgramInfo* buildInfo)
+{
+ vector<deUint8> binary;
+ assembleSpirV(&program, &binary, buildInfo);
+ return new ProgramBinary(PROGRAM_FORMAT_SPIRV, binary.size(), &binary[0]);
+}
+
+Move<VkShaderModule> createShaderModule (const DeviceInterface& deviceInterface, VkDevice device, const ProgramBinary& binary, VkShaderModuleCreateFlags flags)
+{
+ if (binary.getFormat() == PROGRAM_FORMAT_SPIRV)
+ {
+ const struct VkShaderModuleCreateInfo shaderModuleInfo =
+ {
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ DE_NULL,
+ (deUintptr)binary.getSize(),
+ binary.getBinary(),
+ flags,
+ };
+
+ return createShaderModule(deviceInterface, device, &shaderModuleInfo);
+ }
+ else
+ TCU_THROW(NotSupportedError, "Unsupported program format");
+}
+
+glu::ShaderType getGluShaderType (VkShaderStage shaderStage)
+{
+ static const glu::ShaderType s_shaderTypes[] =
+ {
+ glu::SHADERTYPE_VERTEX,
+ glu::SHADERTYPE_TESSELLATION_CONTROL,
+ glu::SHADERTYPE_TESSELLATION_EVALUATION,
+ glu::SHADERTYPE_GEOMETRY,
+ glu::SHADERTYPE_FRAGMENT,
+ glu::SHADERTYPE_COMPUTE
+ };
+
+ return de::getSizedArrayElement<VK_SHADER_STAGE_LAST>(s_shaderTypes, shaderStage);
+}
+
+VkShaderStage getVkShaderStage (glu::ShaderType shaderType)
+{
+ static const VkShaderStage s_shaderStages[] =
+ {
+ VK_SHADER_STAGE_VERTEX,
+ VK_SHADER_STAGE_FRAGMENT,
+ VK_SHADER_STAGE_GEOMETRY,
+ VK_SHADER_STAGE_TESS_CONTROL,
+ VK_SHADER_STAGE_TESS_EVALUATION,
+ VK_SHADER_STAGE_COMPUTE
+ };
+
+ return de::getSizedArrayElement<glu::SHADERTYPE_LAST>(s_shaderStages, shaderType);
+}
+
+} // vk
--- /dev/null
+#ifndef _VKPROGRAMS_HPP
+#define _VKPROGRAMS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Program utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkRef.hpp"
+#include "vkSpirVProgram.hpp"
+#include "gluShaderProgram.hpp"
+#include "deUniquePtr.hpp"
+#include "deSTLUtil.hpp"
+
+#include <vector>
+#include <map>
+
+namespace tcu
+{
+class TestLog;
+} // tcu
+
+namespace vk
+{
+
+enum ProgramFormat
+{
+ PROGRAM_FORMAT_SPIRV = 0,
+
+ PROGRAM_FORMAT_LAST
+};
+
+class ProgramBinary
+{
+public:
+ ProgramBinary (ProgramFormat format, size_t binarySize, const deUint8* binary);
+
+ ProgramFormat getFormat (void) const { return m_format; }
+ size_t getSize (void) const { return m_binary.size(); }
+ const deUint8* getBinary (void) const { return m_binary.empty() ? DE_NULL : &m_binary[0]; }
+
+private:
+ const ProgramFormat m_format;
+ const std::vector<deUint8> m_binary;
+};
+
+template<typename Program>
+class ProgramCollection
+{
+public:
+ ProgramCollection (void);
+ ~ProgramCollection (void);
+
+ void clear (void);
+
+ Program& add (const std::string& name);
+ void add (const std::string& name, de::MovePtr<Program>& program);
+
+ bool contains (const std::string& name) const;
+ const Program& get (const std::string& name) const;
+
+ class Iterator
+ {
+ private:
+ typedef typename std::map<std::string, Program*>::const_iterator IteratorImpl;
+
+ public:
+ explicit Iterator (const IteratorImpl& i) : m_impl(i) {}
+
+ Iterator& operator++ (void) { ++m_impl; return *this; }
+ const Program& operator* (void) const { return getProgram(); }
+
+ const std::string& getName (void) const { return m_impl->first; }
+ const Program& getProgram (void) const { return *m_impl->second; }
+
+ bool operator== (const Iterator& other) const { return m_impl == other.m_impl; }
+ bool operator!= (const Iterator& other) const { return m_impl != other.m_impl; }
+
+ private:
+
+ IteratorImpl m_impl;
+ };
+
+ Iterator begin (void) const { return Iterator(m_programs.begin()); }
+ Iterator end (void) const { return Iterator(m_programs.end()); }
+
+private:
+ typedef std::map<std::string, Program*> ProgramMap;
+
+ ProgramMap m_programs;
+};
+
+template<typename Program>
+ProgramCollection<Program>::ProgramCollection (void)
+{
+}
+
+template<typename Program>
+ProgramCollection<Program>::~ProgramCollection (void)
+{
+ clear();
+}
+
+template<typename Program>
+void ProgramCollection<Program>::clear (void)
+{
+ for (typename ProgramMap::const_iterator i = m_programs.begin(); i != m_programs.end(); ++i)
+ delete i->second;
+ m_programs.clear();
+}
+
+template<typename Program>
+Program& ProgramCollection<Program>::add (const std::string& name)
+{
+ DE_ASSERT(!contains(name));
+ de::MovePtr<Program> prog = de::newMovePtr<Program>();
+ m_programs[name] = prog.get();
+ prog.release();
+ return *m_programs[name];
+}
+
+template<typename Program>
+void ProgramCollection<Program>::add (const std::string& name, de::MovePtr<Program>& program)
+{
+ DE_ASSERT(!contains(name));
+ m_programs[name] = program.get();
+ program.release();
+}
+
+template<typename Program>
+bool ProgramCollection<Program>::contains (const std::string& name) const
+{
+ return de::contains(m_programs, name);
+}
+
+template<typename Program>
+const Program& ProgramCollection<Program>::get (const std::string& name) const
+{
+ DE_ASSERT(contains(name));
+ return *m_programs.find(name)->second;
+}
+
+typedef vk::ProgramCollection<glu::ProgramSources> GlslSourceCollection;
+typedef vk::ProgramCollection<vk::SpirVAsmSource> SpirVAsmCollection;
+
+struct SourceCollections
+{
+ GlslSourceCollection glslSources;
+ SpirVAsmCollection spirvAsmSources;
+};
+
+typedef ProgramCollection<ProgramBinary> BinaryCollection;
+
+// \todo [2015-03-13 pyry] Likely need BinaryBuilder abstraction for this
+ProgramBinary* buildProgram (const glu::ProgramSources& program, ProgramFormat binaryFormat, glu::ShaderProgramInfo* buildInfo);
+ProgramBinary* assembleProgram (const vk::SpirVAsmSource& program, SpirVProgramInfo* buildInfo);
+Move<VkShaderModule> createShaderModule (const DeviceInterface& deviceInterface, VkDevice device, const ProgramBinary& binary, VkShaderModuleCreateFlags flags);
+
+glu::ShaderType getGluShaderType (VkShaderStage shaderStage);
+VkShaderStage getVkShaderStage (glu::ShaderType shaderType);
+
+} // vk
+
+#endif // _VKPROGRAMS_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan query utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkQueryUtil.hpp"
+#include "deMemory.h"
+
+namespace vk
+{
+
+std::vector<VkPhysicalDevice> enumeratePhysicalDevices (const InstanceInterface& vk, VkInstance instance)
+{
+ deUint32 numDevices = 0;
+ std::vector<VkPhysicalDevice> devices;
+
+ VK_CHECK(vk.enumeratePhysicalDevices(instance, &numDevices, DE_NULL));
+
+ if (numDevices > 0)
+ {
+ devices.resize(numDevices);
+ VK_CHECK(vk.enumeratePhysicalDevices(instance, &numDevices, &devices[0]));
+
+ if ((size_t)numDevices != devices.size())
+ TCU_FAIL("Returned device count changed between queries");
+ }
+
+ return devices;
+}
+
+
+std::vector<VkPhysicalDeviceQueueProperties> getPhysicalDeviceQueueProperties (const InstanceInterface& vk, VkPhysicalDevice physicalDevice)
+{
+ deUint32 numQueues = 0;
+ std::vector<VkPhysicalDeviceQueueProperties> properties;
+
+ VK_CHECK(vk.getPhysicalDeviceQueueCount(physicalDevice, &numQueues));
+
+ if (numQueues > 0)
+ {
+ properties.resize(numQueues);
+ VK_CHECK(vk.getPhysicalDeviceQueueProperties(physicalDevice, numQueues, &properties[0]));
+ }
+
+ return properties;
+}
+
+VkPhysicalDeviceMemoryProperties getPhysicalDeviceMemoryProperties (const InstanceInterface& vk, VkPhysicalDevice physicalDevice)
+{
+ VkPhysicalDeviceMemoryProperties properties;
+
+ deMemset(&properties, 0, sizeof(properties));
+
+ VK_CHECK(vk.getPhysicalDeviceMemoryProperties(physicalDevice, &properties));
+ return properties;
+}
+
+VkMemoryRequirements getBufferMemoryRequirements (const DeviceInterface& vk, VkDevice device, VkBuffer buffer)
+{
+ VkMemoryRequirements req;
+ VK_CHECK(vk.getBufferMemoryRequirements(device, buffer, &req));
+ return req;
+}
+VkMemoryRequirements getImageMemoryRequirements (const DeviceInterface& vk, VkDevice device, VkImage image)
+{
+ VkMemoryRequirements req;
+ VK_CHECK(vk.getImageMemoryRequirements(device, image, &req));
+ return req;
+}
+
+} // vk
--- /dev/null
+#ifndef _VKQUERYUTIL_HPP
+#define _VKQUERYUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan query utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+
+#include <vector>
+
+namespace vk
+{
+
+std::vector<VkPhysicalDevice> enumeratePhysicalDevices (const InstanceInterface& vk, VkInstance instance);
+std::vector<VkPhysicalDeviceQueueProperties> getPhysicalDeviceQueueProperties (const InstanceInterface& vk, VkPhysicalDevice physicalDevice);
+VkPhysicalDeviceMemoryProperties getPhysicalDeviceMemoryProperties (const InstanceInterface& vk, VkPhysicalDevice physicalDevice);
+
+VkMemoryRequirements getBufferMemoryRequirements (const DeviceInterface& vk, VkDevice device, VkBuffer buffer);
+VkMemoryRequirements getImageMemoryRequirements (const DeviceInterface& vk, VkDevice device, VkImage image);
+
+} // vk
+
+#endif // _VKQUERYUTIL_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan object reference holder.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkRef.hpp"
+
+DE_EMPTY_CPP_FILE
--- /dev/null
+#ifndef _VKREF_HPP
+#define _VKREF_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan object reference holder.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkStrUtil.hpp"
+#include "deMeta.hpp"
+
+#include <algorithm>
+
+namespace vk
+{
+
+namespace refdetails
+{
+
+using std::swap;
+
+template<typename T>
+struct Checked
+{
+ explicit inline Checked (T object_) : object(object_) {}
+
+ T object;
+};
+
+//! Check that object is not null
+template<typename T>
+inline Checked<T> check (T object)
+{
+ if (!object)
+ throw tcu::TestError("Object check() failed", (std::string(getTypeName<T>()) + " = 0").c_str(), __FILE__, __LINE__);
+ return Checked<T>(object);
+}
+
+//! Declare object as checked earlier
+template<typename T>
+inline Checked<T> notNull (T object)
+{
+ if (!object)
+ throw tcu::InternalError("Null object was given to notNull()", (std::string(getTypeName<T>()) + " = 0").c_str(), __FILE__, __LINE__);
+ return Checked<T>(object);
+}
+
+//! Allow null object
+template<typename T>
+inline Checked<T> allowNull (T object)
+{
+ return Checked<T>(object);
+}
+
+template<typename T>
+class Deleter
+{
+public:
+ Deleter (const DeviceInterface& deviceIface, VkDevice device)
+ : m_deviceIface (&deviceIface)
+ , m_device (device)
+ {}
+ Deleter (void)
+ : m_deviceIface (DE_NULL)
+ , m_device (DE_NULL)
+ {}
+
+ void operator() (T obj) const;
+
+private:
+ const DeviceInterface* m_deviceIface;
+ VkDevice m_device;
+};
+
+template<>
+class Deleter<VkInstance>
+{
+public:
+ Deleter (const PlatformInterface& platformIface, VkInstance instance)
+ : m_destroyInstance((DestroyInstanceFunc)platformIface.getInstanceProcAddr(instance, "vkDestroyInstance"))
+ {}
+ Deleter (void)
+ : m_destroyInstance((DestroyInstanceFunc)DE_NULL)
+ {}
+
+ void operator() (VkInstance obj) const { DE_TEST_ASSERT(m_destroyInstance(obj) == VK_SUCCESS); }
+
+private:
+ DestroyInstanceFunc m_destroyInstance;
+};
+
+template<>
+class Deleter<VkDevice>
+{
+public:
+ Deleter (const InstanceInterface& instanceIface, VkDevice device)
+ : m_destroyDevice((DestroyDeviceFunc)instanceIface.getDeviceProcAddr(device, "vkDestroyDevice"))
+ {}
+ Deleter (void)
+ : m_destroyDevice((DestroyDeviceFunc)DE_NULL)
+ {}
+
+ void operator() (VkDevice obj) const { DE_TEST_ASSERT(m_destroyDevice(obj) == VK_SUCCESS); }
+
+private:
+ DestroyDeviceFunc m_destroyDevice;
+};
+
+template<>
+class Deleter<VkDescriptorSet>
+{
+public:
+ Deleter (const DeviceInterface& deviceIface, VkDevice device, VkDescriptorPool pool)
+ : m_deviceIface (&deviceIface)
+ , m_device (device)
+ , m_pool (pool)
+ {}
+ Deleter (void)
+ : m_deviceIface (DE_NULL)
+ , m_device (DE_NULL)
+ , m_pool (DE_NULL)
+ {}
+
+ void operator() (VkDescriptorSet obj) const { DE_TEST_ASSERT(m_deviceIface->freeDescriptorSets(m_device, m_pool, 1, &obj) == VK_SUCCESS); }
+
+private:
+ const DeviceInterface* m_deviceIface;
+ VkDevice m_device;
+ VkDescriptorPool m_pool;
+};
+
+template<typename T>
+struct RefData
+{
+ RefData (T object_, Deleter<T> deleter_)
+ : object (object_)
+ , deleter (deleter_)
+ {}
+ RefData (void)
+ : object (0)
+ {}
+
+ T object;
+ Deleter<T> deleter;
+};
+
+template<typename T>
+class RefBase
+{
+public:
+ ~RefBase (void);
+
+ inline const T& get (void) const throw() { return m_data.object; }
+ inline const T& operator* (void) const throw() { return get(); }
+ inline operator bool (void) const throw() { return !!get(); }
+
+protected:
+ RefBase (RefData<T> data) : m_data(data) {}
+
+ void reset (void); //!< Release previous object, set to null.
+ RefData<T> disown (void) throw(); //!< Disown and return object (ownership transferred to caller).
+ void assign (RefData<T> data); //!< Set new pointer, release previous pointer.
+
+private:
+ RefData<T> m_data;
+};
+
+template<typename T>
+inline RefBase<T>::~RefBase (void)
+{
+ this->reset();
+}
+
+template<typename T>
+inline void RefBase<T>::reset (void)
+{
+ if (!!m_data.object)
+ m_data.deleter(m_data.object);
+
+ m_data = RefData<T>();
+}
+
+template<typename T>
+inline RefData<T> RefBase<T>::disown (void) throw()
+{
+ RefData<T> tmp;
+ swap(m_data, tmp);
+ return tmp;
+}
+
+template<typename T>
+inline void RefBase<T>::assign (RefData<T> data)
+{
+ this->reset();
+ m_data = data;
+}
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Movable Vulkan object reference.
+ *
+ * Similar to de::MovePtr.
+ *//*--------------------------------------------------------------------*/
+template<typename T>
+class Move : public RefBase<T>
+{
+public:
+ template<typename U>
+ Move (Checked<U> object, Deleter<U> deleter)
+ : RefBase<T>(RefData<T>(object.object, deleter))
+ {}
+
+ Move (RefData<T> data)
+ : RefBase<T>(data)
+ {}
+ Move (Move<T>& other)
+ : RefBase<T>(other.RefBase<T>::disown())
+ {}
+ Move (void)
+ : RefBase<T>(RefData<T>())
+ {}
+
+ T disown (void) { return this->RefBase<T>::disown().object; }
+ Move<T>& operator= (Move<T>& other);
+ Move<T>& operator= (RefData<T> data);
+
+ operator RefData<T> (void) { return this->RefBase<T>::disown(); }
+};
+
+template<typename T>
+inline Move<T>& Move<T>::operator= (Move<T>& other)
+{
+ if (this != &other)
+ this->assign(other.RefBase<T>::disown());
+
+ return *this;
+}
+
+template<typename T>
+inline Move<T>& Move<T>::operator= (RefData<T> data)
+{
+ this->assign(data);
+ return *this;
+}
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Unique Vulkan object reference.
+ *
+ * Similar to de::UniquePtr.
+ *//*--------------------------------------------------------------------*/
+template<typename T>
+class Unique : public RefBase<T>
+{
+public:
+ template<typename U>
+ Unique (Checked<U> object, Deleter<U> deleter)
+ : RefBase<T>(RefData<T>(object.object, deleter))
+ {}
+
+ Unique (RefData<T> data)
+ : RefBase<T>(data)
+ {}
+
+private:
+ Unique (const Unique<T>&);
+ Unique<T>& operator= (const Unique<T>&);
+};
+
+} // refdetails
+
+using refdetails::Move;
+using refdetails::Unique;
+using refdetails::Deleter;
+using refdetails::check;
+using refdetails::notNull;
+using refdetails::allowNull;
+
+} // vk
+
+#endif // _VKREF_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan object reference holder utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkRefUtil.hpp"
+
+namespace vk
+{
+
+#include "vkRefUtilImpl.inl"
+
+Move<VkPipeline> createGraphicsPipeline (const DeviceInterface& vk, VkDevice device, VkPipelineCache pipelineCache, const VkGraphicsPipelineCreateInfo* pCreateInfo)
+{
+ VkPipeline object = 0;
+ VK_CHECK(vk.createGraphicsPipelines(device, pipelineCache, 1u, pCreateInfo, &object));
+ return Move<VkPipeline>(check<VkPipeline>(object), Deleter<VkPipeline>(vk, device));
+}
+
+Move<VkPipeline> createComputePipeline (const DeviceInterface& vk, VkDevice device, VkPipelineCache pipelineCache, const VkComputePipelineCreateInfo* pCreateInfo)
+{
+ VkPipeline object = 0;
+ VK_CHECK(vk.createComputePipelines(device, pipelineCache, 1u, pCreateInfo, &object));
+ return Move<VkPipeline>(check<VkPipeline>(object), Deleter<VkPipeline>(vk, device));
+}
+
+Move<VkDescriptorSet> allocDescriptorSet (const DeviceInterface& vk, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, VkDescriptorSetLayout layout)
+{
+ VkDescriptorSet descriptorSet = 0u;
+ deUint32 numCreated = 0;
+
+ VK_CHECK(vk.allocDescriptorSets(device, descriptorPool, setUsage, 1, &layout, &descriptorSet, &numCreated));
+ if (numCreated != 1u)
+ throw tcu::TestError("failed to allocate descriptor sets");
+
+ return Move<VkDescriptorSet>(check<VkDescriptorSet>(descriptorSet), Deleter<VkDescriptorSet>(vk, device, descriptorPool));
+}
+
+} // vk
--- /dev/null
+#ifndef _VKREFUTIL_HPP
+#define _VKREFUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan object reference holder utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkRef.hpp"
+
+namespace vk
+{
+
+#include "vkRefUtil.inl"
+
+Move<VkPipeline> createGraphicsPipeline (const DeviceInterface& vk, VkDevice device, VkPipelineCache pipelineCache, const VkGraphicsPipelineCreateInfo* pCreateInfo);
+Move<VkPipeline> createComputePipeline (const DeviceInterface& vk, VkDevice device, VkPipelineCache pipelineCache, const VkComputePipelineCreateInfo* pCreateInfo);
+Move<VkDescriptorSet> allocDescriptorSet (const DeviceInterface& vk, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, VkDescriptorSetLayout layout);
+
+} // vk
+
+#endif // _VKREFUTIL_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+Move<VkInstance> createInstance (const PlatformInterface& vk, const VkInstanceCreateInfo* pCreateInfo);
+Move<VkDevice> createDevice (const InstanceInterface& vk, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo);
+Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, const VkMemoryAllocInfo* pAllocInfo);
+Move<VkFence> createFence (const DeviceInterface& vk, VkDevice device, const VkFenceCreateInfo* pCreateInfo);
+Move<VkSemaphore> createSemaphore (const DeviceInterface& vk, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo);
+Move<VkEvent> createEvent (const DeviceInterface& vk, VkDevice device, const VkEventCreateInfo* pCreateInfo);
+Move<VkQueryPool> createQueryPool (const DeviceInterface& vk, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo);
+Move<VkBuffer> createBuffer (const DeviceInterface& vk, VkDevice device, const VkBufferCreateInfo* pCreateInfo);
+Move<VkBufferView> createBufferView (const DeviceInterface& vk, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo);
+Move<VkImage> createImage (const DeviceInterface& vk, VkDevice device, const VkImageCreateInfo* pCreateInfo);
+Move<VkImageView> createImageView (const DeviceInterface& vk, VkDevice device, const VkImageViewCreateInfo* pCreateInfo);
+Move<VkAttachmentView> createAttachmentView (const DeviceInterface& vk, VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo);
+Move<VkShaderModule> createShaderModule (const DeviceInterface& vk, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo);
+Move<VkShader> createShader (const DeviceInterface& vk, VkDevice device, const VkShaderCreateInfo* pCreateInfo);
+Move<VkPipelineCache> createPipelineCache (const DeviceInterface& vk, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo);
+Move<VkPipelineLayout> createPipelineLayout (const DeviceInterface& vk, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo);
+Move<VkSampler> createSampler (const DeviceInterface& vk, VkDevice device, const VkSamplerCreateInfo* pCreateInfo);
+Move<VkDescriptorSetLayout> createDescriptorSetLayout (const DeviceInterface& vk, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo);
+Move<VkDescriptorPool> createDescriptorPool (const DeviceInterface& vk, VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo);
+Move<VkDynamicViewportState> createDynamicViewportState (const DeviceInterface& vk, VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo);
+Move<VkDynamicRasterState> createDynamicRasterState (const DeviceInterface& vk, VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo);
+Move<VkDynamicColorBlendState> createDynamicColorBlendState (const DeviceInterface& vk, VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo);
+Move<VkDynamicDepthStencilState> createDynamicDepthStencilState (const DeviceInterface& vk, VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo);
+Move<VkFramebuffer> createFramebuffer (const DeviceInterface& vk, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo);
+Move<VkRenderPass> createRenderPass (const DeviceInterface& vk, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo);
+Move<VkCmdPool> createCommandPool (const DeviceInterface& vk, VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo);
+Move<VkCmdBuffer> createCommandBuffer (const DeviceInterface& vk, VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo);
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+namespace refdetails
+{
+
+template<>
+void Deleter<VkDeviceMemory>::operator() (VkDeviceMemory obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->freeMemory(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkFence>::operator() (VkFence obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyFence(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkSemaphore>::operator() (VkSemaphore obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroySemaphore(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkEvent>::operator() (VkEvent obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyEvent(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkQueryPool>::operator() (VkQueryPool obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyQueryPool(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkBuffer>::operator() (VkBuffer obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyBuffer(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkBufferView>::operator() (VkBufferView obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyBufferView(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkImage>::operator() (VkImage obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyImage(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkImageView>::operator() (VkImageView obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyImageView(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkAttachmentView>::operator() (VkAttachmentView obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyAttachmentView(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkShaderModule>::operator() (VkShaderModule obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyShaderModule(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkShader>::operator() (VkShader obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyShader(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkPipelineCache>::operator() (VkPipelineCache obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyPipelineCache(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkPipeline>::operator() (VkPipeline obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyPipeline(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkPipelineLayout>::operator() (VkPipelineLayout obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyPipelineLayout(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkSampler>::operator() (VkSampler obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroySampler(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkDescriptorSetLayout>::operator() (VkDescriptorSetLayout obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyDescriptorSetLayout(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkDescriptorPool>::operator() (VkDescriptorPool obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyDescriptorPool(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkDynamicViewportState>::operator() (VkDynamicViewportState obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyDynamicViewportState(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkDynamicRasterState>::operator() (VkDynamicRasterState obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyDynamicRasterState(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkDynamicColorBlendState>::operator() (VkDynamicColorBlendState obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyDynamicColorBlendState(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkDynamicDepthStencilState>::operator() (VkDynamicDepthStencilState obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyDynamicDepthStencilState(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkFramebuffer>::operator() (VkFramebuffer obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyFramebuffer(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkRenderPass>::operator() (VkRenderPass obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyRenderPass(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkCmdPool>::operator() (VkCmdPool obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyCommandPool(m_device, obj) == VK_SUCCESS);
+}
+
+template<>
+void Deleter<VkCmdBuffer>::operator() (VkCmdBuffer obj) const
+{
+ DE_TEST_ASSERT(m_deviceIface->destroyCommandBuffer(m_device, obj) == VK_SUCCESS);
+}
+
+} // refdetails
+
+Move<VkInstance> createInstance (const PlatformInterface& vk, const VkInstanceCreateInfo* pCreateInfo)
+{
+ VkInstance object = 0;
+ VK_CHECK(vk.createInstance(pCreateInfo, &object));
+ return Move<VkInstance>(check<VkInstance>(object), Deleter<VkInstance>(vk, object));
+}
+
+Move<VkDevice> createDevice (const InstanceInterface& vk, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo)
+{
+ VkDevice object = 0;
+ VK_CHECK(vk.createDevice(physicalDevice, pCreateInfo, &object));
+ return Move<VkDevice>(check<VkDevice>(object), Deleter<VkDevice>(vk, object));
+}
+
+Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, const VkMemoryAllocInfo* pAllocInfo)
+{
+ VkDeviceMemory object = 0;
+ VK_CHECK(vk.allocMemory(device, pAllocInfo, &object));
+ return Move<VkDeviceMemory>(check<VkDeviceMemory>(object), Deleter<VkDeviceMemory>(vk, device));
+}
+
+Move<VkFence> createFence (const DeviceInterface& vk, VkDevice device, const VkFenceCreateInfo* pCreateInfo)
+{
+ VkFence object = 0;
+ VK_CHECK(vk.createFence(device, pCreateInfo, &object));
+ return Move<VkFence>(check<VkFence>(object), Deleter<VkFence>(vk, device));
+}
+
+Move<VkSemaphore> createSemaphore (const DeviceInterface& vk, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo)
+{
+ VkSemaphore object = 0;
+ VK_CHECK(vk.createSemaphore(device, pCreateInfo, &object));
+ return Move<VkSemaphore>(check<VkSemaphore>(object), Deleter<VkSemaphore>(vk, device));
+}
+
+Move<VkEvent> createEvent (const DeviceInterface& vk, VkDevice device, const VkEventCreateInfo* pCreateInfo)
+{
+ VkEvent object = 0;
+ VK_CHECK(vk.createEvent(device, pCreateInfo, &object));
+ return Move<VkEvent>(check<VkEvent>(object), Deleter<VkEvent>(vk, device));
+}
+
+Move<VkQueryPool> createQueryPool (const DeviceInterface& vk, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo)
+{
+ VkQueryPool object = 0;
+ VK_CHECK(vk.createQueryPool(device, pCreateInfo, &object));
+ return Move<VkQueryPool>(check<VkQueryPool>(object), Deleter<VkQueryPool>(vk, device));
+}
+
+Move<VkBuffer> createBuffer (const DeviceInterface& vk, VkDevice device, const VkBufferCreateInfo* pCreateInfo)
+{
+ VkBuffer object = 0;
+ VK_CHECK(vk.createBuffer(device, pCreateInfo, &object));
+ return Move<VkBuffer>(check<VkBuffer>(object), Deleter<VkBuffer>(vk, device));
+}
+
+Move<VkBufferView> createBufferView (const DeviceInterface& vk, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo)
+{
+ VkBufferView object = 0;
+ VK_CHECK(vk.createBufferView(device, pCreateInfo, &object));
+ return Move<VkBufferView>(check<VkBufferView>(object), Deleter<VkBufferView>(vk, device));
+}
+
+Move<VkImage> createImage (const DeviceInterface& vk, VkDevice device, const VkImageCreateInfo* pCreateInfo)
+{
+ VkImage object = 0;
+ VK_CHECK(vk.createImage(device, pCreateInfo, &object));
+ return Move<VkImage>(check<VkImage>(object), Deleter<VkImage>(vk, device));
+}
+
+Move<VkImageView> createImageView (const DeviceInterface& vk, VkDevice device, const VkImageViewCreateInfo* pCreateInfo)
+{
+ VkImageView object = 0;
+ VK_CHECK(vk.createImageView(device, pCreateInfo, &object));
+ return Move<VkImageView>(check<VkImageView>(object), Deleter<VkImageView>(vk, device));
+}
+
+Move<VkAttachmentView> createAttachmentView (const DeviceInterface& vk, VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo)
+{
+ VkAttachmentView object = 0;
+ VK_CHECK(vk.createAttachmentView(device, pCreateInfo, &object));
+ return Move<VkAttachmentView>(check<VkAttachmentView>(object), Deleter<VkAttachmentView>(vk, device));
+}
+
+Move<VkShaderModule> createShaderModule (const DeviceInterface& vk, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo)
+{
+ VkShaderModule object = 0;
+ VK_CHECK(vk.createShaderModule(device, pCreateInfo, &object));
+ return Move<VkShaderModule>(check<VkShaderModule>(object), Deleter<VkShaderModule>(vk, device));
+}
+
+Move<VkShader> createShader (const DeviceInterface& vk, VkDevice device, const VkShaderCreateInfo* pCreateInfo)
+{
+ VkShader object = 0;
+ VK_CHECK(vk.createShader(device, pCreateInfo, &object));
+ return Move<VkShader>(check<VkShader>(object), Deleter<VkShader>(vk, device));
+}
+
+Move<VkPipelineCache> createPipelineCache (const DeviceInterface& vk, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo)
+{
+ VkPipelineCache object = 0;
+ VK_CHECK(vk.createPipelineCache(device, pCreateInfo, &object));
+ return Move<VkPipelineCache>(check<VkPipelineCache>(object), Deleter<VkPipelineCache>(vk, device));
+}
+
+Move<VkPipelineLayout> createPipelineLayout (const DeviceInterface& vk, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo)
+{
+ VkPipelineLayout object = 0;
+ VK_CHECK(vk.createPipelineLayout(device, pCreateInfo, &object));
+ return Move<VkPipelineLayout>(check<VkPipelineLayout>(object), Deleter<VkPipelineLayout>(vk, device));
+}
+
+Move<VkSampler> createSampler (const DeviceInterface& vk, VkDevice device, const VkSamplerCreateInfo* pCreateInfo)
+{
+ VkSampler object = 0;
+ VK_CHECK(vk.createSampler(device, pCreateInfo, &object));
+ return Move<VkSampler>(check<VkSampler>(object), Deleter<VkSampler>(vk, device));
+}
+
+Move<VkDescriptorSetLayout> createDescriptorSetLayout (const DeviceInterface& vk, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo)
+{
+ VkDescriptorSetLayout object = 0;
+ VK_CHECK(vk.createDescriptorSetLayout(device, pCreateInfo, &object));
+ return Move<VkDescriptorSetLayout>(check<VkDescriptorSetLayout>(object), Deleter<VkDescriptorSetLayout>(vk, device));
+}
+
+Move<VkDescriptorPool> createDescriptorPool (const DeviceInterface& vk, VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo)
+{
+ VkDescriptorPool object = 0;
+ VK_CHECK(vk.createDescriptorPool(device, poolUsage, maxSets, pCreateInfo, &object));
+ return Move<VkDescriptorPool>(check<VkDescriptorPool>(object), Deleter<VkDescriptorPool>(vk, device));
+}
+
+Move<VkDynamicViewportState> createDynamicViewportState (const DeviceInterface& vk, VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo)
+{
+ VkDynamicViewportState object = 0;
+ VK_CHECK(vk.createDynamicViewportState(device, pCreateInfo, &object));
+ return Move<VkDynamicViewportState>(check<VkDynamicViewportState>(object), Deleter<VkDynamicViewportState>(vk, device));
+}
+
+Move<VkDynamicRasterState> createDynamicRasterState (const DeviceInterface& vk, VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo)
+{
+ VkDynamicRasterState object = 0;
+ VK_CHECK(vk.createDynamicRasterState(device, pCreateInfo, &object));
+ return Move<VkDynamicRasterState>(check<VkDynamicRasterState>(object), Deleter<VkDynamicRasterState>(vk, device));
+}
+
+Move<VkDynamicColorBlendState> createDynamicColorBlendState (const DeviceInterface& vk, VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo)
+{
+ VkDynamicColorBlendState object = 0;
+ VK_CHECK(vk.createDynamicColorBlendState(device, pCreateInfo, &object));
+ return Move<VkDynamicColorBlendState>(check<VkDynamicColorBlendState>(object), Deleter<VkDynamicColorBlendState>(vk, device));
+}
+
+Move<VkDynamicDepthStencilState> createDynamicDepthStencilState (const DeviceInterface& vk, VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo)
+{
+ VkDynamicDepthStencilState object = 0;
+ VK_CHECK(vk.createDynamicDepthStencilState(device, pCreateInfo, &object));
+ return Move<VkDynamicDepthStencilState>(check<VkDynamicDepthStencilState>(object), Deleter<VkDynamicDepthStencilState>(vk, device));
+}
+
+Move<VkFramebuffer> createFramebuffer (const DeviceInterface& vk, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo)
+{
+ VkFramebuffer object = 0;
+ VK_CHECK(vk.createFramebuffer(device, pCreateInfo, &object));
+ return Move<VkFramebuffer>(check<VkFramebuffer>(object), Deleter<VkFramebuffer>(vk, device));
+}
+
+Move<VkRenderPass> createRenderPass (const DeviceInterface& vk, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo)
+{
+ VkRenderPass object = 0;
+ VK_CHECK(vk.createRenderPass(device, pCreateInfo, &object));
+ return Move<VkRenderPass>(check<VkRenderPass>(object), Deleter<VkRenderPass>(vk, device));
+}
+
+Move<VkCmdPool> createCommandPool (const DeviceInterface& vk, VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo)
+{
+ VkCmdPool object = 0;
+ VK_CHECK(vk.createCommandPool(device, pCreateInfo, &object));
+ return Move<VkCmdPool>(check<VkCmdPool>(object), Deleter<VkCmdPool>(vk, device));
+}
+
+Move<VkCmdBuffer> createCommandBuffer (const DeviceInterface& vk, VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo)
+{
+ VkCmdBuffer object = 0;
+ VK_CHECK(vk.createCommandBuffer(device, pCreateInfo, &object));
+ return Move<VkCmdBuffer>(check<VkCmdBuffer>(object), Deleter<VkCmdBuffer>(vk, device));
+}
+
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief SPIR-V assembly to binary.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkSpirVAsm.hpp"
+#include "vkSpirVProgram.hpp"
+#include "deArrayUtil.hpp"
+#include "deMemory.h"
+#include "deClock.h"
+#include "qpDebugOut.h"
+
+#if defined(DEQP_HAVE_SPIRV_TOOLS)
+# include "deSingleton.h"
+
+# include "libspirv/libspirv.h"
+#endif
+
+namespace vk
+{
+
+using std::string;
+using std::vector;
+
+#if defined(DEQP_HAVE_SPIRV_TOOLS)
+
+namespace
+{
+static volatile deSingletonState s_spirvInitState = DE_SINGLETON_STATE_NOT_INITIALIZED;
+static spv_opcode_table s_spirvOpcodeTable;
+static spv_operand_table s_spirvOperandTable;
+static spv_ext_inst_table s_spirvExtInstTable;
+
+void initSpirVTools (void*)
+{
+ if (spvOpcodeTableGet(&s_spirvOpcodeTable) != SPV_SUCCESS)
+ TCU_THROW(InternalError, "Cannot get opcode table for assembly");
+
+ if (spvOperandTableGet(&s_spirvOperandTable) != SPV_SUCCESS)
+ TCU_THROW(InternalError, "Cannot get operand table for assembly");
+
+ if (spvExtInstTableGet(&s_spirvExtInstTable) != SPV_SUCCESS)
+ TCU_THROW(InternalError, "Cannot get external instruction table for assembly");
+}
+
+void prepareSpirvTools (void)
+{
+ deInitSingleton(&s_spirvInitState, initSpirVTools, DE_NULL);
+}
+
+} // anonymous
+
+void assembleSpirV (const SpirVAsmSource* program, std::vector<deUint8>* dst, SpirVProgramInfo* buildInfo)
+{
+ prepareSpirvTools();
+
+ const std::string& spvSource = program->program.str();
+ spv_binary binary = DE_NULL;
+ spv_diagnostic diagnostic = DE_NULL;
+ const deUint64 compileStartTime = deGetMicroseconds();
+ const spv_result_t compileOk = spvTextToBinary(spvSource.c_str(), spvSource.size(), s_spirvOpcodeTable, s_spirvOperandTable, s_spirvExtInstTable, &binary, &diagnostic);
+
+ {
+ buildInfo->source = program;
+ buildInfo->infoLog = diagnostic? diagnostic->error : ""; // \todo [2015-07-13 pyry] Include debug log?
+ buildInfo->compileTimeUs = deGetMicroseconds() - compileStartTime;
+ buildInfo->compileOk = (compileOk == SPV_SUCCESS);
+ }
+
+ if (compileOk != SPV_SUCCESS)
+ TCU_FAIL("Failed to compile shader");
+
+ dst->resize((int)binary->wordCount * sizeof(deUint32));
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ deMemcpy(&(*dst)[0], &binary->code[0], dst->size());
+#else
+# error "Big-endian not supported"
+#endif
+ spvBinaryDestroy(binary);
+ spvDiagnosticDestroy(diagnostic);
+ return;
+}
+
+#else // defined(DEQP_HAVE_SPIRV_TOOLS)
+
+void assembleSpirV (const SpirVAsmSource*, std::vector<deUint8>*, SpirVProgramInfo*)
+{
+ TCU_THROW(NotSupportedError, "SPIR-V assembly not supported (DEQP_HAVE_SPIRV_TOOLS not defined)");
+}
+
+#endif
+
+} // vk
--- /dev/null
+#ifndef _VKSPIRVASM_HPP
+#define _VKSPIRVASM_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief SPIR-V assembly to binary.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkPrograms.hpp"
+
+namespace vk
+{
+
+//! Assemble SPIR-V program. Will fail with NotSupportedError if compiler is not available.
+void assembleSpirV (const SpirVAsmSource* program, std::vector<deUint8>* dst, SpirVProgramInfo* buildInfo);
+
+} // vk
+
+#endif // _VKSPIRVASM_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Spirv program and binary info.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkSpirVProgram.hpp"
+
+#include "tcuTestLog.hpp"
+
+namespace vk
+{
+
+tcu::TestLog& operator<< (tcu::TestLog& log, const SpirVProgramInfo& shaderInfo)
+{
+ log << tcu::TestLog::ShaderProgram(shaderInfo.compileOk , shaderInfo.infoLog) << tcu::TestLog::EndShaderProgram;
+
+ // Write statistics
+ log << tcu::TestLog::Float( "SpirVAssemblyTime",
+ "SpirV assembly time",
+ "ms", QP_KEY_TAG_TIME, (float)shaderInfo.compileTimeUs / 1000.0f);
+ return log;
+}
+
+tcu::TestLog& operator<< (tcu::TestLog& log, const SpirVAsmSource& source)
+{
+ log << tcu::TestLog::KernelSource(source.program.str());
+
+ return log;
+}
+
+} // vk
--- /dev/null
+#ifndef _VKSPIRVPROGRAM_HPP
+#define _VKSPIRVPROGRAM_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief SPIR-V program and binary info.
+ *//*--------------------------------------------------------------------*/
+
+#include <sstream>
+
+#include "vkDefs.hpp"
+#include "tcuTestLog.hpp"
+
+namespace vk
+{
+
+struct SpirVAsmSource
+{
+ SpirVAsmSource& operator<<(const char* val)
+ {
+ program << val;
+ return *this;
+ }
+ std::ostringstream program;
+};
+
+struct SpirVProgramInfo
+{
+ SpirVProgramInfo()
+ : source (DE_NULL)
+ , compileTimeUs (0)
+ , compileOk (false)
+ {
+ }
+
+ const SpirVAsmSource* source;
+ std::string infoLog;
+ deUint64 compileTimeUs;
+ bool compileOk;
+};
+
+tcu::TestLog& operator<< (tcu::TestLog& log, const SpirVProgramInfo& shaderInfo);
+tcu::TestLog& operator<< (tcu::TestLog& log, const SpirVAsmSource& program);
+
+}
+
+#endif // _VKSPIRVPROGRAM_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Pretty-printing and logging utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkStrUtil.hpp"
+
+namespace vk
+{
+
+struct CharPtr
+{
+ const char* ptr;
+
+ CharPtr (const char* ptr_) : ptr(ptr_) {}
+};
+
+std::ostream& operator<< (std::ostream& str, const CharPtr& ptr)
+{
+ if (!ptr.ptr)
+ return str << "(null)";
+ else
+ return str << '"' << ptr.ptr << '"';
+}
+
+inline CharPtr getCharPtrStr (const char* ptr)
+{
+ return CharPtr(ptr);
+}
+
+#include "vkStrUtilImpl.inl"
+
+} // vk
--- /dev/null
+#ifndef _VKSTRUTIL_HPP
+#define _VKSTRUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan CTS Framework
+ * --------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Pretty-printing and logging utilities.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "tcuFormatUtil.hpp"
+
+namespace vk
+{
+
+#include "vkStrUtil.inl"
+
+template<typename T>
+const char* getTypeName (void);
+
+template<HandleType Type>
+inline std::ostream& operator<< (std::ostream& s, const Handle<Type>& handle)
+{
+ return s << tcu::toHex(handle.getInternal());
+}
+
+} // vk
+
+#endif // _VKSTRUTIL_HPP
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+const char* getResultName (VkResult value);
+const char* getStructureTypeName (VkStructureType value);
+const char* getSystemAllocTypeName (VkSystemAllocType value);
+const char* getFormatName (VkFormat value);
+const char* getImageTypeName (VkImageType value);
+const char* getImageTilingName (VkImageTiling value);
+const char* getPhysicalDeviceTypeName (VkPhysicalDeviceType value);
+const char* getImageAspectName (VkImageAspect value);
+const char* getQueryTypeName (VkQueryType value);
+const char* getSharingModeName (VkSharingMode value);
+const char* getBufferViewTypeName (VkBufferViewType value);
+const char* getImageViewTypeName (VkImageViewType value);
+const char* getChannelSwizzleName (VkChannelSwizzle value);
+const char* getShaderStageName (VkShaderStage value);
+const char* getVertexInputStepRateName (VkVertexInputStepRate value);
+const char* getPrimitiveTopologyName (VkPrimitiveTopology value);
+const char* getFillModeName (VkFillMode value);
+const char* getCullModeName (VkCullMode value);
+const char* getFrontFaceName (VkFrontFace value);
+const char* getCompareOpName (VkCompareOp value);
+const char* getStencilOpName (VkStencilOp value);
+const char* getLogicOpName (VkLogicOp value);
+const char* getBlendName (VkBlend value);
+const char* getBlendOpName (VkBlendOp value);
+const char* getTexFilterName (VkTexFilter value);
+const char* getTexMipmapModeName (VkTexMipmapMode value);
+const char* getTexAddressName (VkTexAddress value);
+const char* getBorderColorName (VkBorderColor value);
+const char* getDescriptorTypeName (VkDescriptorType value);
+const char* getDescriptorPoolUsageName (VkDescriptorPoolUsage value);
+const char* getDescriptorSetUsageName (VkDescriptorSetUsage value);
+const char* getImageLayoutName (VkImageLayout value);
+const char* getAttachmentLoadOpName (VkAttachmentLoadOp value);
+const char* getAttachmentStoreOpName (VkAttachmentStoreOp value);
+const char* getPipelineBindPointName (VkPipelineBindPoint value);
+const char* getCmdBufferLevelName (VkCmdBufferLevel value);
+const char* getIndexTypeName (VkIndexType value);
+const char* getTimestampTypeName (VkTimestampType value);
+const char* getRenderPassContentsName (VkRenderPassContents value);
+
+inline tcu::Format::Enum<VkResult> getResultStr (VkResult value) { return tcu::Format::Enum<VkResult>(getResultName, value); }
+inline tcu::Format::Enum<VkStructureType> getStructureTypeStr (VkStructureType value) { return tcu::Format::Enum<VkStructureType>(getStructureTypeName, value); }
+inline tcu::Format::Enum<VkSystemAllocType> getSystemAllocTypeStr (VkSystemAllocType value) { return tcu::Format::Enum<VkSystemAllocType>(getSystemAllocTypeName, value); }
+inline tcu::Format::Enum<VkFormat> getFormatStr (VkFormat value) { return tcu::Format::Enum<VkFormat>(getFormatName, value); }
+inline tcu::Format::Enum<VkImageType> getImageTypeStr (VkImageType value) { return tcu::Format::Enum<VkImageType>(getImageTypeName, value); }
+inline tcu::Format::Enum<VkImageTiling> getImageTilingStr (VkImageTiling value) { return tcu::Format::Enum<VkImageTiling>(getImageTilingName, value); }
+inline tcu::Format::Enum<VkPhysicalDeviceType> getPhysicalDeviceTypeStr (VkPhysicalDeviceType value) { return tcu::Format::Enum<VkPhysicalDeviceType>(getPhysicalDeviceTypeName, value); }
+inline tcu::Format::Enum<VkImageAspect> getImageAspectStr (VkImageAspect value) { return tcu::Format::Enum<VkImageAspect>(getImageAspectName, value); }
+inline tcu::Format::Enum<VkQueryType> getQueryTypeStr (VkQueryType value) { return tcu::Format::Enum<VkQueryType>(getQueryTypeName, value); }
+inline tcu::Format::Enum<VkSharingMode> getSharingModeStr (VkSharingMode value) { return tcu::Format::Enum<VkSharingMode>(getSharingModeName, value); }
+inline tcu::Format::Enum<VkBufferViewType> getBufferViewTypeStr (VkBufferViewType value) { return tcu::Format::Enum<VkBufferViewType>(getBufferViewTypeName, value); }
+inline tcu::Format::Enum<VkImageViewType> getImageViewTypeStr (VkImageViewType value) { return tcu::Format::Enum<VkImageViewType>(getImageViewTypeName, value); }
+inline tcu::Format::Enum<VkChannelSwizzle> getChannelSwizzleStr (VkChannelSwizzle value) { return tcu::Format::Enum<VkChannelSwizzle>(getChannelSwizzleName, value); }
+inline tcu::Format::Enum<VkShaderStage> getShaderStageStr (VkShaderStage value) { return tcu::Format::Enum<VkShaderStage>(getShaderStageName, value); }
+inline tcu::Format::Enum<VkVertexInputStepRate> getVertexInputStepRateStr (VkVertexInputStepRate value) { return tcu::Format::Enum<VkVertexInputStepRate>(getVertexInputStepRateName, value); }
+inline tcu::Format::Enum<VkPrimitiveTopology> getPrimitiveTopologyStr (VkPrimitiveTopology value) { return tcu::Format::Enum<VkPrimitiveTopology>(getPrimitiveTopologyName, value); }
+inline tcu::Format::Enum<VkFillMode> getFillModeStr (VkFillMode value) { return tcu::Format::Enum<VkFillMode>(getFillModeName, value); }
+inline tcu::Format::Enum<VkCullMode> getCullModeStr (VkCullMode value) { return tcu::Format::Enum<VkCullMode>(getCullModeName, value); }
+inline tcu::Format::Enum<VkFrontFace> getFrontFaceStr (VkFrontFace value) { return tcu::Format::Enum<VkFrontFace>(getFrontFaceName, value); }
+inline tcu::Format::Enum<VkCompareOp> getCompareOpStr (VkCompareOp value) { return tcu::Format::Enum<VkCompareOp>(getCompareOpName, value); }
+inline tcu::Format::Enum<VkStencilOp> getStencilOpStr (VkStencilOp value) { return tcu::Format::Enum<VkStencilOp>(getStencilOpName, value); }
+inline tcu::Format::Enum<VkLogicOp> getLogicOpStr (VkLogicOp value) { return tcu::Format::Enum<VkLogicOp>(getLogicOpName, value); }
+inline tcu::Format::Enum<VkBlend> getBlendStr (VkBlend value) { return tcu::Format::Enum<VkBlend>(getBlendName, value); }
+inline tcu::Format::Enum<VkBlendOp> getBlendOpStr (VkBlendOp value) { return tcu::Format::Enum<VkBlendOp>(getBlendOpName, value); }
+inline tcu::Format::Enum<VkTexFilter> getTexFilterStr (VkTexFilter value) { return tcu::Format::Enum<VkTexFilter>(getTexFilterName, value); }
+inline tcu::Format::Enum<VkTexMipmapMode> getTexMipmapModeStr (VkTexMipmapMode value) { return tcu::Format::Enum<VkTexMipmapMode>(getTexMipmapModeName, value); }
+inline tcu::Format::Enum<VkTexAddress> getTexAddressStr (VkTexAddress value) { return tcu::Format::Enum<VkTexAddress>(getTexAddressName, value); }
+inline tcu::Format::Enum<VkBorderColor> getBorderColorStr (VkBorderColor value) { return tcu::Format::Enum<VkBorderColor>(getBorderColorName, value); }
+inline tcu::Format::Enum<VkDescriptorType> getDescriptorTypeStr (VkDescriptorType value) { return tcu::Format::Enum<VkDescriptorType>(getDescriptorTypeName, value); }
+inline tcu::Format::Enum<VkDescriptorPoolUsage> getDescriptorPoolUsageStr (VkDescriptorPoolUsage value) { return tcu::Format::Enum<VkDescriptorPoolUsage>(getDescriptorPoolUsageName, value); }
+inline tcu::Format::Enum<VkDescriptorSetUsage> getDescriptorSetUsageStr (VkDescriptorSetUsage value) { return tcu::Format::Enum<VkDescriptorSetUsage>(getDescriptorSetUsageName, value); }
+inline tcu::Format::Enum<VkImageLayout> getImageLayoutStr (VkImageLayout value) { return tcu::Format::Enum<VkImageLayout>(getImageLayoutName, value); }
+inline tcu::Format::Enum<VkAttachmentLoadOp> getAttachmentLoadOpStr (VkAttachmentLoadOp value) { return tcu::Format::Enum<VkAttachmentLoadOp>(getAttachmentLoadOpName, value); }
+inline tcu::Format::Enum<VkAttachmentStoreOp> getAttachmentStoreOpStr (VkAttachmentStoreOp value) { return tcu::Format::Enum<VkAttachmentStoreOp>(getAttachmentStoreOpName, value); }
+inline tcu::Format::Enum<VkPipelineBindPoint> getPipelineBindPointStr (VkPipelineBindPoint value) { return tcu::Format::Enum<VkPipelineBindPoint>(getPipelineBindPointName, value); }
+inline tcu::Format::Enum<VkCmdBufferLevel> getCmdBufferLevelStr (VkCmdBufferLevel value) { return tcu::Format::Enum<VkCmdBufferLevel>(getCmdBufferLevelName, value); }
+inline tcu::Format::Enum<VkIndexType> getIndexTypeStr (VkIndexType value) { return tcu::Format::Enum<VkIndexType>(getIndexTypeName, value); }
+inline tcu::Format::Enum<VkTimestampType> getTimestampTypeStr (VkTimestampType value) { return tcu::Format::Enum<VkTimestampType>(getTimestampTypeName, value); }
+inline tcu::Format::Enum<VkRenderPassContents> getRenderPassContentsStr (VkRenderPassContents value) { return tcu::Format::Enum<VkRenderPassContents>(getRenderPassContentsName, value); }
+
+inline std::ostream& operator<< (std::ostream& s, VkResult value) { return s << getResultStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkStructureType value) { return s << getStructureTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkSystemAllocType value) { return s << getSystemAllocTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkFormat value) { return s << getFormatStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkImageType value) { return s << getImageTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkImageTiling value) { return s << getImageTilingStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkPhysicalDeviceType value) { return s << getPhysicalDeviceTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkImageAspect value) { return s << getImageAspectStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkQueryType value) { return s << getQueryTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkSharingMode value) { return s << getSharingModeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkBufferViewType value) { return s << getBufferViewTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkImageViewType value) { return s << getImageViewTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkChannelSwizzle value) { return s << getChannelSwizzleStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkShaderStage value) { return s << getShaderStageStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkVertexInputStepRate value) { return s << getVertexInputStepRateStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkPrimitiveTopology value) { return s << getPrimitiveTopologyStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkFillMode value) { return s << getFillModeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkCullMode value) { return s << getCullModeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkFrontFace value) { return s << getFrontFaceStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkCompareOp value) { return s << getCompareOpStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkStencilOp value) { return s << getStencilOpStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkLogicOp value) { return s << getLogicOpStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkBlend value) { return s << getBlendStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkBlendOp value) { return s << getBlendOpStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkTexFilter value) { return s << getTexFilterStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkTexMipmapMode value) { return s << getTexMipmapModeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkTexAddress value) { return s << getTexAddressStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkBorderColor value) { return s << getBorderColorStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkDescriptorType value) { return s << getDescriptorTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkDescriptorPoolUsage value) { return s << getDescriptorPoolUsageStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkDescriptorSetUsage value) { return s << getDescriptorSetUsageStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkImageLayout value) { return s << getImageLayoutStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkAttachmentLoadOp value) { return s << getAttachmentLoadOpStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkAttachmentStoreOp value) { return s << getAttachmentStoreOpStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkPipelineBindPoint value) { return s << getPipelineBindPointStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkCmdBufferLevel value) { return s << getCmdBufferLevelStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkIndexType value) { return s << getIndexTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkTimestampType value) { return s << getTimestampTypeStr(value); }
+inline std::ostream& operator<< (std::ostream& s, VkRenderPassContents value) { return s << getRenderPassContentsStr(value); }
+
+tcu::Format::Bitfield<32> getFormatFeatureFlagsStr (VkFormatFeatureFlags value);
+tcu::Format::Bitfield<32> getImageUsageFlagsStr (VkImageUsageFlags value);
+tcu::Format::Bitfield<32> getQueueFlagsStr (VkQueueFlags value);
+tcu::Format::Bitfield<32> getMemoryPropertyFlagsStr (VkMemoryPropertyFlags value);
+tcu::Format::Bitfield<32> getMemoryHeapFlagsStr (VkMemoryHeapFlags value);
+tcu::Format::Bitfield<32> getDeviceCreateFlagsStr (VkDeviceCreateFlags value);
+tcu::Format::Bitfield<32> getSparseImageFormatFlagsStr (VkSparseImageFormatFlags value);
+tcu::Format::Bitfield<32> getSparseMemoryBindFlagsStr (VkSparseMemoryBindFlags value);
+tcu::Format::Bitfield<32> getFenceCreateFlagsStr (VkFenceCreateFlags value);
+tcu::Format::Bitfield<32> getQueryPipelineStatisticFlagsStr (VkQueryPipelineStatisticFlags value);
+tcu::Format::Bitfield<32> getQueryResultFlagsStr (VkQueryResultFlags value);
+tcu::Format::Bitfield<32> getBufferUsageFlagsStr (VkBufferUsageFlags value);
+tcu::Format::Bitfield<32> getBufferCreateFlagsStr (VkBufferCreateFlags value);
+tcu::Format::Bitfield<32> getImageCreateFlagsStr (VkImageCreateFlags value);
+tcu::Format::Bitfield<32> getAttachmentViewCreateFlagsStr (VkAttachmentViewCreateFlags value);
+tcu::Format::Bitfield<32> getChannelFlagsStr (VkChannelFlags value);
+tcu::Format::Bitfield<32> getPipelineCreateFlagsStr (VkPipelineCreateFlags value);
+tcu::Format::Bitfield<32> getShaderStageFlagsStr (VkShaderStageFlags value);
+tcu::Format::Bitfield<32> getSubpassDescriptionFlagsStr (VkSubpassDescriptionFlags value);
+tcu::Format::Bitfield<32> getPipelineStageFlagsStr (VkPipelineStageFlags value);
+tcu::Format::Bitfield<32> getMemoryOutputFlagsStr (VkMemoryOutputFlags value);
+tcu::Format::Bitfield<32> getMemoryInputFlagsStr (VkMemoryInputFlags value);
+tcu::Format::Bitfield<32> getCmdPoolCreateFlagsStr (VkCmdPoolCreateFlags value);
+tcu::Format::Bitfield<32> getCmdPoolResetFlagsStr (VkCmdPoolResetFlags value);
+tcu::Format::Bitfield<32> getCmdBufferOptimizeFlagsStr (VkCmdBufferOptimizeFlags value);
+tcu::Format::Bitfield<32> getCmdBufferResetFlagsStr (VkCmdBufferResetFlags value);
+tcu::Format::Bitfield<32> getImageAspectFlagsStr (VkImageAspectFlags value);
+tcu::Format::Bitfield<32> getQueryControlFlagsStr (VkQueryControlFlags value);
+
+std::ostream& operator<< (std::ostream& s, const VkApplicationInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkAllocCallbacks& value);
+std::ostream& operator<< (std::ostream& s, const VkInstanceCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceFeatures& value);
+std::ostream& operator<< (std::ostream& s, const VkFormatProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkImageFormatProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceLimits& value);
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceQueueProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkMemoryType& value);
+std::ostream& operator<< (std::ostream& s, const VkMemoryHeap& value);
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceMemoryProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkDeviceQueueCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDeviceCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkExtensionProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkLayerProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkMemoryAllocInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkMappedMemoryRange& value);
+std::ostream& operator<< (std::ostream& s, const VkMemoryRequirements& value);
+std::ostream& operator<< (std::ostream& s, const VkExtent3D& value);
+std::ostream& operator<< (std::ostream& s, const VkSparseImageFormatProperties& value);
+std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryRequirements& value);
+std::ostream& operator<< (std::ostream& s, const VkSparseMemoryBindInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkImageSubresource& value);
+std::ostream& operator<< (std::ostream& s, const VkOffset3D& value);
+std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryBindInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkFenceCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkSemaphoreCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkEventCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkQueryPoolCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkBufferCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkBufferViewCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkImageCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkSubresourceLayout& value);
+std::ostream& operator<< (std::ostream& s, const VkChannelMapping& value);
+std::ostream& operator<< (std::ostream& s, const VkImageSubresourceRange& value);
+std::ostream& operator<< (std::ostream& s, const VkImageViewCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkAttachmentViewCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkShaderModuleCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkShaderCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineCacheCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkSpecializationMapEntry& value);
+std::ostream& operator<< (std::ostream& s, const VkSpecializationInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineShaderStageCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkVertexInputBindingDescription& value);
+std::ostream& operator<< (std::ostream& s, const VkVertexInputAttributeDescription& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineVertexInputStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineInputAssemblyStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineTessellationStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineViewportStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineRasterStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineMultisampleStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkStencilOpState& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineDepthStencilStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineColorBlendAttachmentState& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineColorBlendStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkGraphicsPipelineCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkComputePipelineCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkPushConstantRange& value);
+std::ostream& operator<< (std::ostream& s, const VkPipelineLayoutCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkSamplerCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDescriptorSetLayoutBinding& value);
+std::ostream& operator<< (std::ostream& s, const VkDescriptorSetLayoutCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDescriptorTypeCount& value);
+std::ostream& operator<< (std::ostream& s, const VkDescriptorPoolCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDescriptorInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkWriteDescriptorSet& value);
+std::ostream& operator<< (std::ostream& s, const VkCopyDescriptorSet& value);
+std::ostream& operator<< (std::ostream& s, const VkViewport& value);
+std::ostream& operator<< (std::ostream& s, const VkOffset2D& value);
+std::ostream& operator<< (std::ostream& s, const VkExtent2D& value);
+std::ostream& operator<< (std::ostream& s, const VkRect2D& value);
+std::ostream& operator<< (std::ostream& s, const VkDynamicViewportStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDynamicRasterStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDynamicColorBlendStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkDynamicDepthStencilStateCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkAttachmentBindInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkFramebufferCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkAttachmentDescription& value);
+std::ostream& operator<< (std::ostream& s, const VkAttachmentReference& value);
+std::ostream& operator<< (std::ostream& s, const VkSubpassDescription& value);
+std::ostream& operator<< (std::ostream& s, const VkSubpassDependency& value);
+std::ostream& operator<< (std::ostream& s, const VkRenderPassCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkCmdPoolCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkCmdBufferCreateInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkCmdBufferBeginInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkBufferCopy& value);
+std::ostream& operator<< (std::ostream& s, const VkImageCopy& value);
+std::ostream& operator<< (std::ostream& s, const VkImageBlit& value);
+std::ostream& operator<< (std::ostream& s, const VkBufferImageCopy& value);
+std::ostream& operator<< (std::ostream& s, const VkClearColorValue& value);
+std::ostream& operator<< (std::ostream& s, const VkRect3D& value);
+std::ostream& operator<< (std::ostream& s, const VkImageResolve& value);
+std::ostream& operator<< (std::ostream& s, const VkClearDepthStencilValue& value);
+std::ostream& operator<< (std::ostream& s, const VkClearValue& value);
+std::ostream& operator<< (std::ostream& s, const VkRenderPassBeginInfo& value);
+std::ostream& operator<< (std::ostream& s, const VkBufferMemoryBarrier& value);
+std::ostream& operator<< (std::ostream& s, const VkDispatchIndirectCmd& value);
+std::ostream& operator<< (std::ostream& s, const VkDrawIndexedIndirectCmd& value);
+std::ostream& operator<< (std::ostream& s, const VkDrawIndirectCmd& value);
+std::ostream& operator<< (std::ostream& s, const VkImageMemoryBarrier& value);
+std::ostream& operator<< (std::ostream& s, const VkMemoryBarrier& value);
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+template<> const char* getTypeName<VkInstance> (void) { return "VkInstance"; }
+template<> const char* getTypeName<VkPhysicalDevice> (void) { return "VkPhysicalDevice"; }
+template<> const char* getTypeName<VkDevice> (void) { return "VkDevice"; }
+template<> const char* getTypeName<VkQueue> (void) { return "VkQueue"; }
+template<> const char* getTypeName<VkCmdBuffer> (void) { return "VkCmdBuffer"; }
+template<> const char* getTypeName<VkFence> (void) { return "VkFence"; }
+template<> const char* getTypeName<VkDeviceMemory> (void) { return "VkDeviceMemory"; }
+template<> const char* getTypeName<VkBuffer> (void) { return "VkBuffer"; }
+template<> const char* getTypeName<VkImage> (void) { return "VkImage"; }
+template<> const char* getTypeName<VkSemaphore> (void) { return "VkSemaphore"; }
+template<> const char* getTypeName<VkEvent> (void) { return "VkEvent"; }
+template<> const char* getTypeName<VkQueryPool> (void) { return "VkQueryPool"; }
+template<> const char* getTypeName<VkBufferView> (void) { return "VkBufferView"; }
+template<> const char* getTypeName<VkImageView> (void) { return "VkImageView"; }
+template<> const char* getTypeName<VkAttachmentView> (void) { return "VkAttachmentView"; }
+template<> const char* getTypeName<VkShaderModule> (void) { return "VkShaderModule"; }
+template<> const char* getTypeName<VkShader> (void) { return "VkShader"; }
+template<> const char* getTypeName<VkPipelineCache> (void) { return "VkPipelineCache"; }
+template<> const char* getTypeName<VkPipelineLayout> (void) { return "VkPipelineLayout"; }
+template<> const char* getTypeName<VkRenderPass> (void) { return "VkRenderPass"; }
+template<> const char* getTypeName<VkPipeline> (void) { return "VkPipeline"; }
+template<> const char* getTypeName<VkDescriptorSetLayout> (void) { return "VkDescriptorSetLayout"; }
+template<> const char* getTypeName<VkSampler> (void) { return "VkSampler"; }
+template<> const char* getTypeName<VkDescriptorPool> (void) { return "VkDescriptorPool"; }
+template<> const char* getTypeName<VkDescriptorSet> (void) { return "VkDescriptorSet"; }
+template<> const char* getTypeName<VkDynamicViewportState> (void) { return "VkDynamicViewportState"; }
+template<> const char* getTypeName<VkDynamicRasterState> (void) { return "VkDynamicRasterState"; }
+template<> const char* getTypeName<VkDynamicColorBlendState> (void) { return "VkDynamicColorBlendState"; }
+template<> const char* getTypeName<VkDynamicDepthStencilState> (void) { return "VkDynamicDepthStencilState"; }
+template<> const char* getTypeName<VkFramebuffer> (void) { return "VkFramebuffer"; }
+template<> const char* getTypeName<VkCmdPool> (void) { return "VkCmdPool"; }
+
+const char* getResultName (VkResult value)
+{
+ switch (value)
+ {
+ case VK_SUCCESS: return "VK_SUCCESS";
+ case VK_UNSUPPORTED: return "VK_UNSUPPORTED";
+ case VK_NOT_READY: return "VK_NOT_READY";
+ case VK_TIMEOUT: return "VK_TIMEOUT";
+ case VK_EVENT_SET: return "VK_EVENT_SET";
+ case VK_EVENT_RESET: return "VK_EVENT_RESET";
+ case VK_INCOMPLETE: return "VK_INCOMPLETE";
+ case VK_ERROR_UNKNOWN: return "VK_ERROR_UNKNOWN";
+ case VK_ERROR_UNAVAILABLE: return "VK_ERROR_UNAVAILABLE";
+ case VK_ERROR_INITIALIZATION_FAILED: return "VK_ERROR_INITIALIZATION_FAILED";
+ case VK_ERROR_OUT_OF_HOST_MEMORY: return "VK_ERROR_OUT_OF_HOST_MEMORY";
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+ case VK_ERROR_DEVICE_ALREADY_CREATED: return "VK_ERROR_DEVICE_ALREADY_CREATED";
+ case VK_ERROR_DEVICE_LOST: return "VK_ERROR_DEVICE_LOST";
+ case VK_ERROR_INVALID_POINTER: return "VK_ERROR_INVALID_POINTER";
+ case VK_ERROR_INVALID_VALUE: return "VK_ERROR_INVALID_VALUE";
+ case VK_ERROR_INVALID_HANDLE: return "VK_ERROR_INVALID_HANDLE";
+ case VK_ERROR_INVALID_ORDINAL: return "VK_ERROR_INVALID_ORDINAL";
+ case VK_ERROR_INVALID_MEMORY_SIZE: return "VK_ERROR_INVALID_MEMORY_SIZE";
+ case VK_ERROR_INVALID_EXTENSION: return "VK_ERROR_INVALID_EXTENSION";
+ case VK_ERROR_INVALID_FLAGS: return "VK_ERROR_INVALID_FLAGS";
+ case VK_ERROR_INVALID_ALIGNMENT: return "VK_ERROR_INVALID_ALIGNMENT";
+ case VK_ERROR_INVALID_FORMAT: return "VK_ERROR_INVALID_FORMAT";
+ case VK_ERROR_INVALID_IMAGE: return "VK_ERROR_INVALID_IMAGE";
+ case VK_ERROR_INVALID_DESCRIPTOR_SET_DATA: return "VK_ERROR_INVALID_DESCRIPTOR_SET_DATA";
+ case VK_ERROR_INVALID_QUEUE_TYPE: return "VK_ERROR_INVALID_QUEUE_TYPE";
+ case VK_ERROR_UNSUPPORTED_SHADER_IL_VERSION: return "VK_ERROR_UNSUPPORTED_SHADER_IL_VERSION";
+ case VK_ERROR_BAD_SHADER_CODE: return "VK_ERROR_BAD_SHADER_CODE";
+ case VK_ERROR_BAD_PIPELINE_DATA: return "VK_ERROR_BAD_PIPELINE_DATA";
+ case VK_ERROR_NOT_MAPPABLE: return "VK_ERROR_NOT_MAPPABLE";
+ case VK_ERROR_MEMORY_MAP_FAILED: return "VK_ERROR_MEMORY_MAP_FAILED";
+ case VK_ERROR_MEMORY_UNMAP_FAILED: return "VK_ERROR_MEMORY_UNMAP_FAILED";
+ case VK_ERROR_INCOMPATIBLE_DEVICE: return "VK_ERROR_INCOMPATIBLE_DEVICE";
+ case VK_ERROR_INCOMPATIBLE_DRIVER: return "VK_ERROR_INCOMPATIBLE_DRIVER";
+ case VK_ERROR_INCOMPLETE_COMMAND_BUFFER: return "VK_ERROR_INCOMPLETE_COMMAND_BUFFER";
+ case VK_ERROR_BUILDING_COMMAND_BUFFER: return "VK_ERROR_BUILDING_COMMAND_BUFFER";
+ case VK_ERROR_MEMORY_NOT_BOUND: return "VK_ERROR_MEMORY_NOT_BOUND";
+ case VK_ERROR_INCOMPATIBLE_QUEUE: return "VK_ERROR_INCOMPATIBLE_QUEUE";
+ case VK_ERROR_INVALID_LAYER: return "VK_ERROR_INVALID_LAYER";
+ default: return DE_NULL;
+ }
+}
+
+const char* getStructureTypeName (VkStructureType value)
+{
+ switch (value)
+ {
+ case VK_STRUCTURE_TYPE_APPLICATION_INFO: return "VK_STRUCTURE_TYPE_APPLICATION_INFO";
+ case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO: return "VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO: return "VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO";
+ case VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO: return "VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO: return "VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO: return "VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SHADER_CREATE_INFO: return "VK_STRUCTURE_TYPE_SHADER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO: return "VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO: return "VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO: return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO: return "VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_EVENT_CREATE_INFO: return "VK_STRUCTURE_TYPE_EVENT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_FENCE_CREATE_INFO: return "VK_STRUCTURE_TYPE_FENCE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO: return "VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO: return "VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO: return "VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTER_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_RASTER_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO: return "VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO: return "VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO: return "VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO: return "VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO: return "VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO: return "VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_BARRIER: return "VK_STRUCTURE_TYPE_MEMORY_BARRIER";
+ case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER: return "VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER";
+ case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER: return "VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO: return "VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET";
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: return "VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET";
+ case VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO: return "VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE: return "VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE";
+ case VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO: return "VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION: return "VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION";
+ case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION: return "VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION";
+ case VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY: return "VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO: return "VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO";
+ case VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO: return "VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO";
+ default: return DE_NULL;
+ }
+}
+
+const char* getSystemAllocTypeName (VkSystemAllocType value)
+{
+ switch (value)
+ {
+ case VK_SYSTEM_ALLOC_TYPE_API_OBJECT: return "VK_SYSTEM_ALLOC_TYPE_API_OBJECT";
+ case VK_SYSTEM_ALLOC_TYPE_INTERNAL: return "VK_SYSTEM_ALLOC_TYPE_INTERNAL";
+ case VK_SYSTEM_ALLOC_TYPE_INTERNAL_TEMP: return "VK_SYSTEM_ALLOC_TYPE_INTERNAL_TEMP";
+ case VK_SYSTEM_ALLOC_TYPE_INTERNAL_SHADER: return "VK_SYSTEM_ALLOC_TYPE_INTERNAL_SHADER";
+ case VK_SYSTEM_ALLOC_TYPE_DEBUG: return "VK_SYSTEM_ALLOC_TYPE_DEBUG";
+ default: return DE_NULL;
+ }
+}
+
+const char* getFormatName (VkFormat value)
+{
+ switch (value)
+ {
+ case VK_FORMAT_UNDEFINED: return "VK_FORMAT_UNDEFINED";
+ case VK_FORMAT_R4G4_UNORM: return "VK_FORMAT_R4G4_UNORM";
+ case VK_FORMAT_R4G4_USCALED: return "VK_FORMAT_R4G4_USCALED";
+ case VK_FORMAT_R4G4B4A4_UNORM: return "VK_FORMAT_R4G4B4A4_UNORM";
+ case VK_FORMAT_R4G4B4A4_USCALED: return "VK_FORMAT_R4G4B4A4_USCALED";
+ case VK_FORMAT_R5G6B5_UNORM: return "VK_FORMAT_R5G6B5_UNORM";
+ case VK_FORMAT_R5G6B5_USCALED: return "VK_FORMAT_R5G6B5_USCALED";
+ case VK_FORMAT_R5G5B5A1_UNORM: return "VK_FORMAT_R5G5B5A1_UNORM";
+ case VK_FORMAT_R5G5B5A1_USCALED: return "VK_FORMAT_R5G5B5A1_USCALED";
+ case VK_FORMAT_R8_UNORM: return "VK_FORMAT_R8_UNORM";
+ case VK_FORMAT_R8_SNORM: return "VK_FORMAT_R8_SNORM";
+ case VK_FORMAT_R8_USCALED: return "VK_FORMAT_R8_USCALED";
+ case VK_FORMAT_R8_SSCALED: return "VK_FORMAT_R8_SSCALED";
+ case VK_FORMAT_R8_UINT: return "VK_FORMAT_R8_UINT";
+ case VK_FORMAT_R8_SINT: return "VK_FORMAT_R8_SINT";
+ case VK_FORMAT_R8_SRGB: return "VK_FORMAT_R8_SRGB";
+ case VK_FORMAT_R8G8_UNORM: return "VK_FORMAT_R8G8_UNORM";
+ case VK_FORMAT_R8G8_SNORM: return "VK_FORMAT_R8G8_SNORM";
+ case VK_FORMAT_R8G8_USCALED: return "VK_FORMAT_R8G8_USCALED";
+ case VK_FORMAT_R8G8_SSCALED: return "VK_FORMAT_R8G8_SSCALED";
+ case VK_FORMAT_R8G8_UINT: return "VK_FORMAT_R8G8_UINT";
+ case VK_FORMAT_R8G8_SINT: return "VK_FORMAT_R8G8_SINT";
+ case VK_FORMAT_R8G8_SRGB: return "VK_FORMAT_R8G8_SRGB";
+ case VK_FORMAT_R8G8B8_UNORM: return "VK_FORMAT_R8G8B8_UNORM";
+ case VK_FORMAT_R8G8B8_SNORM: return "VK_FORMAT_R8G8B8_SNORM";
+ case VK_FORMAT_R8G8B8_USCALED: return "VK_FORMAT_R8G8B8_USCALED";
+ case VK_FORMAT_R8G8B8_SSCALED: return "VK_FORMAT_R8G8B8_SSCALED";
+ case VK_FORMAT_R8G8B8_UINT: return "VK_FORMAT_R8G8B8_UINT";
+ case VK_FORMAT_R8G8B8_SINT: return "VK_FORMAT_R8G8B8_SINT";
+ case VK_FORMAT_R8G8B8_SRGB: return "VK_FORMAT_R8G8B8_SRGB";
+ case VK_FORMAT_R8G8B8A8_UNORM: return "VK_FORMAT_R8G8B8A8_UNORM";
+ case VK_FORMAT_R8G8B8A8_SNORM: return "VK_FORMAT_R8G8B8A8_SNORM";
+ case VK_FORMAT_R8G8B8A8_USCALED: return "VK_FORMAT_R8G8B8A8_USCALED";
+ case VK_FORMAT_R8G8B8A8_SSCALED: return "VK_FORMAT_R8G8B8A8_SSCALED";
+ case VK_FORMAT_R8G8B8A8_UINT: return "VK_FORMAT_R8G8B8A8_UINT";
+ case VK_FORMAT_R8G8B8A8_SINT: return "VK_FORMAT_R8G8B8A8_SINT";
+ case VK_FORMAT_R8G8B8A8_SRGB: return "VK_FORMAT_R8G8B8A8_SRGB";
+ case VK_FORMAT_R10G10B10A2_UNORM: return "VK_FORMAT_R10G10B10A2_UNORM";
+ case VK_FORMAT_R10G10B10A2_SNORM: return "VK_FORMAT_R10G10B10A2_SNORM";
+ case VK_FORMAT_R10G10B10A2_USCALED: return "VK_FORMAT_R10G10B10A2_USCALED";
+ case VK_FORMAT_R10G10B10A2_SSCALED: return "VK_FORMAT_R10G10B10A2_SSCALED";
+ case VK_FORMAT_R10G10B10A2_UINT: return "VK_FORMAT_R10G10B10A2_UINT";
+ case VK_FORMAT_R10G10B10A2_SINT: return "VK_FORMAT_R10G10B10A2_SINT";
+ case VK_FORMAT_R16_UNORM: return "VK_FORMAT_R16_UNORM";
+ case VK_FORMAT_R16_SNORM: return "VK_FORMAT_R16_SNORM";
+ case VK_FORMAT_R16_USCALED: return "VK_FORMAT_R16_USCALED";
+ case VK_FORMAT_R16_SSCALED: return "VK_FORMAT_R16_SSCALED";
+ case VK_FORMAT_R16_UINT: return "VK_FORMAT_R16_UINT";
+ case VK_FORMAT_R16_SINT: return "VK_FORMAT_R16_SINT";
+ case VK_FORMAT_R16_SFLOAT: return "VK_FORMAT_R16_SFLOAT";
+ case VK_FORMAT_R16G16_UNORM: return "VK_FORMAT_R16G16_UNORM";
+ case VK_FORMAT_R16G16_SNORM: return "VK_FORMAT_R16G16_SNORM";
+ case VK_FORMAT_R16G16_USCALED: return "VK_FORMAT_R16G16_USCALED";
+ case VK_FORMAT_R16G16_SSCALED: return "VK_FORMAT_R16G16_SSCALED";
+ case VK_FORMAT_R16G16_UINT: return "VK_FORMAT_R16G16_UINT";
+ case VK_FORMAT_R16G16_SINT: return "VK_FORMAT_R16G16_SINT";
+ case VK_FORMAT_R16G16_SFLOAT: return "VK_FORMAT_R16G16_SFLOAT";
+ case VK_FORMAT_R16G16B16_UNORM: return "VK_FORMAT_R16G16B16_UNORM";
+ case VK_FORMAT_R16G16B16_SNORM: return "VK_FORMAT_R16G16B16_SNORM";
+ case VK_FORMAT_R16G16B16_USCALED: return "VK_FORMAT_R16G16B16_USCALED";
+ case VK_FORMAT_R16G16B16_SSCALED: return "VK_FORMAT_R16G16B16_SSCALED";
+ case VK_FORMAT_R16G16B16_UINT: return "VK_FORMAT_R16G16B16_UINT";
+ case VK_FORMAT_R16G16B16_SINT: return "VK_FORMAT_R16G16B16_SINT";
+ case VK_FORMAT_R16G16B16_SFLOAT: return "VK_FORMAT_R16G16B16_SFLOAT";
+ case VK_FORMAT_R16G16B16A16_UNORM: return "VK_FORMAT_R16G16B16A16_UNORM";
+ case VK_FORMAT_R16G16B16A16_SNORM: return "VK_FORMAT_R16G16B16A16_SNORM";
+ case VK_FORMAT_R16G16B16A16_USCALED: return "VK_FORMAT_R16G16B16A16_USCALED";
+ case VK_FORMAT_R16G16B16A16_SSCALED: return "VK_FORMAT_R16G16B16A16_SSCALED";
+ case VK_FORMAT_R16G16B16A16_UINT: return "VK_FORMAT_R16G16B16A16_UINT";
+ case VK_FORMAT_R16G16B16A16_SINT: return "VK_FORMAT_R16G16B16A16_SINT";
+ case VK_FORMAT_R16G16B16A16_SFLOAT: return "VK_FORMAT_R16G16B16A16_SFLOAT";
+ case VK_FORMAT_R32_UINT: return "VK_FORMAT_R32_UINT";
+ case VK_FORMAT_R32_SINT: return "VK_FORMAT_R32_SINT";
+ case VK_FORMAT_R32_SFLOAT: return "VK_FORMAT_R32_SFLOAT";
+ case VK_FORMAT_R32G32_UINT: return "VK_FORMAT_R32G32_UINT";
+ case VK_FORMAT_R32G32_SINT: return "VK_FORMAT_R32G32_SINT";
+ case VK_FORMAT_R32G32_SFLOAT: return "VK_FORMAT_R32G32_SFLOAT";
+ case VK_FORMAT_R32G32B32_UINT: return "VK_FORMAT_R32G32B32_UINT";
+ case VK_FORMAT_R32G32B32_SINT: return "VK_FORMAT_R32G32B32_SINT";
+ case VK_FORMAT_R32G32B32_SFLOAT: return "VK_FORMAT_R32G32B32_SFLOAT";
+ case VK_FORMAT_R32G32B32A32_UINT: return "VK_FORMAT_R32G32B32A32_UINT";
+ case VK_FORMAT_R32G32B32A32_SINT: return "VK_FORMAT_R32G32B32A32_SINT";
+ case VK_FORMAT_R32G32B32A32_SFLOAT: return "VK_FORMAT_R32G32B32A32_SFLOAT";
+ case VK_FORMAT_R64_SFLOAT: return "VK_FORMAT_R64_SFLOAT";
+ case VK_FORMAT_R64G64_SFLOAT: return "VK_FORMAT_R64G64_SFLOAT";
+ case VK_FORMAT_R64G64B64_SFLOAT: return "VK_FORMAT_R64G64B64_SFLOAT";
+ case VK_FORMAT_R64G64B64A64_SFLOAT: return "VK_FORMAT_R64G64B64A64_SFLOAT";
+ case VK_FORMAT_R11G11B10_UFLOAT: return "VK_FORMAT_R11G11B10_UFLOAT";
+ case VK_FORMAT_R9G9B9E5_UFLOAT: return "VK_FORMAT_R9G9B9E5_UFLOAT";
+ case VK_FORMAT_D16_UNORM: return "VK_FORMAT_D16_UNORM";
+ case VK_FORMAT_D24_UNORM: return "VK_FORMAT_D24_UNORM";
+ case VK_FORMAT_D32_SFLOAT: return "VK_FORMAT_D32_SFLOAT";
+ case VK_FORMAT_S8_UINT: return "VK_FORMAT_S8_UINT";
+ case VK_FORMAT_D16_UNORM_S8_UINT: return "VK_FORMAT_D16_UNORM_S8_UINT";
+ case VK_FORMAT_D24_UNORM_S8_UINT: return "VK_FORMAT_D24_UNORM_S8_UINT";
+ case VK_FORMAT_D32_SFLOAT_S8_UINT: return "VK_FORMAT_D32_SFLOAT_S8_UINT";
+ case VK_FORMAT_BC1_RGB_UNORM: return "VK_FORMAT_BC1_RGB_UNORM";
+ case VK_FORMAT_BC1_RGB_SRGB: return "VK_FORMAT_BC1_RGB_SRGB";
+ case VK_FORMAT_BC1_RGBA_UNORM: return "VK_FORMAT_BC1_RGBA_UNORM";
+ case VK_FORMAT_BC1_RGBA_SRGB: return "VK_FORMAT_BC1_RGBA_SRGB";
+ case VK_FORMAT_BC2_UNORM: return "VK_FORMAT_BC2_UNORM";
+ case VK_FORMAT_BC2_SRGB: return "VK_FORMAT_BC2_SRGB";
+ case VK_FORMAT_BC3_UNORM: return "VK_FORMAT_BC3_UNORM";
+ case VK_FORMAT_BC3_SRGB: return "VK_FORMAT_BC3_SRGB";
+ case VK_FORMAT_BC4_UNORM: return "VK_FORMAT_BC4_UNORM";
+ case VK_FORMAT_BC4_SNORM: return "VK_FORMAT_BC4_SNORM";
+ case VK_FORMAT_BC5_UNORM: return "VK_FORMAT_BC5_UNORM";
+ case VK_FORMAT_BC5_SNORM: return "VK_FORMAT_BC5_SNORM";
+ case VK_FORMAT_BC6H_UFLOAT: return "VK_FORMAT_BC6H_UFLOAT";
+ case VK_FORMAT_BC6H_SFLOAT: return "VK_FORMAT_BC6H_SFLOAT";
+ case VK_FORMAT_BC7_UNORM: return "VK_FORMAT_BC7_UNORM";
+ case VK_FORMAT_BC7_SRGB: return "VK_FORMAT_BC7_SRGB";
+ case VK_FORMAT_ETC2_R8G8B8_UNORM: return "VK_FORMAT_ETC2_R8G8B8_UNORM";
+ case VK_FORMAT_ETC2_R8G8B8_SRGB: return "VK_FORMAT_ETC2_R8G8B8_SRGB";
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM: return "VK_FORMAT_ETC2_R8G8B8A1_UNORM";
+ case VK_FORMAT_ETC2_R8G8B8A1_SRGB: return "VK_FORMAT_ETC2_R8G8B8A1_SRGB";
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM: return "VK_FORMAT_ETC2_R8G8B8A8_UNORM";
+ case VK_FORMAT_ETC2_R8G8B8A8_SRGB: return "VK_FORMAT_ETC2_R8G8B8A8_SRGB";
+ case VK_FORMAT_EAC_R11_UNORM: return "VK_FORMAT_EAC_R11_UNORM";
+ case VK_FORMAT_EAC_R11_SNORM: return "VK_FORMAT_EAC_R11_SNORM";
+ case VK_FORMAT_EAC_R11G11_UNORM: return "VK_FORMAT_EAC_R11G11_UNORM";
+ case VK_FORMAT_EAC_R11G11_SNORM: return "VK_FORMAT_EAC_R11G11_SNORM";
+ case VK_FORMAT_ASTC_4x4_UNORM: return "VK_FORMAT_ASTC_4x4_UNORM";
+ case VK_FORMAT_ASTC_4x4_SRGB: return "VK_FORMAT_ASTC_4x4_SRGB";
+ case VK_FORMAT_ASTC_5x4_UNORM: return "VK_FORMAT_ASTC_5x4_UNORM";
+ case VK_FORMAT_ASTC_5x4_SRGB: return "VK_FORMAT_ASTC_5x4_SRGB";
+ case VK_FORMAT_ASTC_5x5_UNORM: return "VK_FORMAT_ASTC_5x5_UNORM";
+ case VK_FORMAT_ASTC_5x5_SRGB: return "VK_FORMAT_ASTC_5x5_SRGB";
+ case VK_FORMAT_ASTC_6x5_UNORM: return "VK_FORMAT_ASTC_6x5_UNORM";
+ case VK_FORMAT_ASTC_6x5_SRGB: return "VK_FORMAT_ASTC_6x5_SRGB";
+ case VK_FORMAT_ASTC_6x6_UNORM: return "VK_FORMAT_ASTC_6x6_UNORM";
+ case VK_FORMAT_ASTC_6x6_SRGB: return "VK_FORMAT_ASTC_6x6_SRGB";
+ case VK_FORMAT_ASTC_8x5_UNORM: return "VK_FORMAT_ASTC_8x5_UNORM";
+ case VK_FORMAT_ASTC_8x5_SRGB: return "VK_FORMAT_ASTC_8x5_SRGB";
+ case VK_FORMAT_ASTC_8x6_UNORM: return "VK_FORMAT_ASTC_8x6_UNORM";
+ case VK_FORMAT_ASTC_8x6_SRGB: return "VK_FORMAT_ASTC_8x6_SRGB";
+ case VK_FORMAT_ASTC_8x8_UNORM: return "VK_FORMAT_ASTC_8x8_UNORM";
+ case VK_FORMAT_ASTC_8x8_SRGB: return "VK_FORMAT_ASTC_8x8_SRGB";
+ case VK_FORMAT_ASTC_10x5_UNORM: return "VK_FORMAT_ASTC_10x5_UNORM";
+ case VK_FORMAT_ASTC_10x5_SRGB: return "VK_FORMAT_ASTC_10x5_SRGB";
+ case VK_FORMAT_ASTC_10x6_UNORM: return "VK_FORMAT_ASTC_10x6_UNORM";
+ case VK_FORMAT_ASTC_10x6_SRGB: return "VK_FORMAT_ASTC_10x6_SRGB";
+ case VK_FORMAT_ASTC_10x8_UNORM: return "VK_FORMAT_ASTC_10x8_UNORM";
+ case VK_FORMAT_ASTC_10x8_SRGB: return "VK_FORMAT_ASTC_10x8_SRGB";
+ case VK_FORMAT_ASTC_10x10_UNORM: return "VK_FORMAT_ASTC_10x10_UNORM";
+ case VK_FORMAT_ASTC_10x10_SRGB: return "VK_FORMAT_ASTC_10x10_SRGB";
+ case VK_FORMAT_ASTC_12x10_UNORM: return "VK_FORMAT_ASTC_12x10_UNORM";
+ case VK_FORMAT_ASTC_12x10_SRGB: return "VK_FORMAT_ASTC_12x10_SRGB";
+ case VK_FORMAT_ASTC_12x12_UNORM: return "VK_FORMAT_ASTC_12x12_UNORM";
+ case VK_FORMAT_ASTC_12x12_SRGB: return "VK_FORMAT_ASTC_12x12_SRGB";
+ case VK_FORMAT_B4G4R4A4_UNORM: return "VK_FORMAT_B4G4R4A4_UNORM";
+ case VK_FORMAT_B5G5R5A1_UNORM: return "VK_FORMAT_B5G5R5A1_UNORM";
+ case VK_FORMAT_B5G6R5_UNORM: return "VK_FORMAT_B5G6R5_UNORM";
+ case VK_FORMAT_B5G6R5_USCALED: return "VK_FORMAT_B5G6R5_USCALED";
+ case VK_FORMAT_B8G8R8_UNORM: return "VK_FORMAT_B8G8R8_UNORM";
+ case VK_FORMAT_B8G8R8_SNORM: return "VK_FORMAT_B8G8R8_SNORM";
+ case VK_FORMAT_B8G8R8_USCALED: return "VK_FORMAT_B8G8R8_USCALED";
+ case VK_FORMAT_B8G8R8_SSCALED: return "VK_FORMAT_B8G8R8_SSCALED";
+ case VK_FORMAT_B8G8R8_UINT: return "VK_FORMAT_B8G8R8_UINT";
+ case VK_FORMAT_B8G8R8_SINT: return "VK_FORMAT_B8G8R8_SINT";
+ case VK_FORMAT_B8G8R8_SRGB: return "VK_FORMAT_B8G8R8_SRGB";
+ case VK_FORMAT_B8G8R8A8_UNORM: return "VK_FORMAT_B8G8R8A8_UNORM";
+ case VK_FORMAT_B8G8R8A8_SNORM: return "VK_FORMAT_B8G8R8A8_SNORM";
+ case VK_FORMAT_B8G8R8A8_USCALED: return "VK_FORMAT_B8G8R8A8_USCALED";
+ case VK_FORMAT_B8G8R8A8_SSCALED: return "VK_FORMAT_B8G8R8A8_SSCALED";
+ case VK_FORMAT_B8G8R8A8_UINT: return "VK_FORMAT_B8G8R8A8_UINT";
+ case VK_FORMAT_B8G8R8A8_SINT: return "VK_FORMAT_B8G8R8A8_SINT";
+ case VK_FORMAT_B8G8R8A8_SRGB: return "VK_FORMAT_B8G8R8A8_SRGB";
+ case VK_FORMAT_B10G10R10A2_UNORM: return "VK_FORMAT_B10G10R10A2_UNORM";
+ case VK_FORMAT_B10G10R10A2_SNORM: return "VK_FORMAT_B10G10R10A2_SNORM";
+ case VK_FORMAT_B10G10R10A2_USCALED: return "VK_FORMAT_B10G10R10A2_USCALED";
+ case VK_FORMAT_B10G10R10A2_SSCALED: return "VK_FORMAT_B10G10R10A2_SSCALED";
+ case VK_FORMAT_B10G10R10A2_UINT: return "VK_FORMAT_B10G10R10A2_UINT";
+ case VK_FORMAT_B10G10R10A2_SINT: return "VK_FORMAT_B10G10R10A2_SINT";
+ default: return DE_NULL;
+ }
+}
+
+const char* getImageTypeName (VkImageType value)
+{
+ switch (value)
+ {
+ case VK_IMAGE_TYPE_1D: return "VK_IMAGE_TYPE_1D";
+ case VK_IMAGE_TYPE_2D: return "VK_IMAGE_TYPE_2D";
+ case VK_IMAGE_TYPE_3D: return "VK_IMAGE_TYPE_3D";
+ default: return DE_NULL;
+ }
+}
+
+const char* getImageTilingName (VkImageTiling value)
+{
+ switch (value)
+ {
+ case VK_IMAGE_TILING_LINEAR: return "VK_IMAGE_TILING_LINEAR";
+ case VK_IMAGE_TILING_OPTIMAL: return "VK_IMAGE_TILING_OPTIMAL";
+ default: return DE_NULL;
+ }
+}
+
+const char* getPhysicalDeviceTypeName (VkPhysicalDeviceType value)
+{
+ switch (value)
+ {
+ case VK_PHYSICAL_DEVICE_TYPE_OTHER: return "VK_PHYSICAL_DEVICE_TYPE_OTHER";
+ case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: return "VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU";
+ case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: return "VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU";
+ case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: return "VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU";
+ case VK_PHYSICAL_DEVICE_TYPE_CPU: return "VK_PHYSICAL_DEVICE_TYPE_CPU";
+ default: return DE_NULL;
+ }
+}
+
+const char* getImageAspectName (VkImageAspect value)
+{
+ switch (value)
+ {
+ case VK_IMAGE_ASPECT_COLOR: return "VK_IMAGE_ASPECT_COLOR";
+ case VK_IMAGE_ASPECT_DEPTH: return "VK_IMAGE_ASPECT_DEPTH";
+ case VK_IMAGE_ASPECT_STENCIL: return "VK_IMAGE_ASPECT_STENCIL";
+ case VK_IMAGE_ASPECT_METADATA: return "VK_IMAGE_ASPECT_METADATA";
+ default: return DE_NULL;
+ }
+}
+
+const char* getQueryTypeName (VkQueryType value)
+{
+ switch (value)
+ {
+ case VK_QUERY_TYPE_OCCLUSION: return "VK_QUERY_TYPE_OCCLUSION";
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS: return "VK_QUERY_TYPE_PIPELINE_STATISTICS";
+ default: return DE_NULL;
+ }
+}
+
+const char* getSharingModeName (VkSharingMode value)
+{
+ switch (value)
+ {
+ case VK_SHARING_MODE_EXCLUSIVE: return "VK_SHARING_MODE_EXCLUSIVE";
+ case VK_SHARING_MODE_CONCURRENT: return "VK_SHARING_MODE_CONCURRENT";
+ default: return DE_NULL;
+ }
+}
+
+const char* getBufferViewTypeName (VkBufferViewType value)
+{
+ switch (value)
+ {
+ case VK_BUFFER_VIEW_TYPE_RAW: return "VK_BUFFER_VIEW_TYPE_RAW";
+ case VK_BUFFER_VIEW_TYPE_FORMATTED: return "VK_BUFFER_VIEW_TYPE_FORMATTED";
+ default: return DE_NULL;
+ }
+}
+
+const char* getImageViewTypeName (VkImageViewType value)
+{
+ switch (value)
+ {
+ case VK_IMAGE_VIEW_TYPE_1D: return "VK_IMAGE_VIEW_TYPE_1D";
+ case VK_IMAGE_VIEW_TYPE_2D: return "VK_IMAGE_VIEW_TYPE_2D";
+ case VK_IMAGE_VIEW_TYPE_3D: return "VK_IMAGE_VIEW_TYPE_3D";
+ case VK_IMAGE_VIEW_TYPE_CUBE: return "VK_IMAGE_VIEW_TYPE_CUBE";
+ case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return "VK_IMAGE_VIEW_TYPE_1D_ARRAY";
+ case VK_IMAGE_VIEW_TYPE_2D_ARRAY: return "VK_IMAGE_VIEW_TYPE_2D_ARRAY";
+ case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return "VK_IMAGE_VIEW_TYPE_CUBE_ARRAY";
+ default: return DE_NULL;
+ }
+}
+
+const char* getChannelSwizzleName (VkChannelSwizzle value)
+{
+ switch (value)
+ {
+ case VK_CHANNEL_SWIZZLE_ZERO: return "VK_CHANNEL_SWIZZLE_ZERO";
+ case VK_CHANNEL_SWIZZLE_ONE: return "VK_CHANNEL_SWIZZLE_ONE";
+ case VK_CHANNEL_SWIZZLE_R: return "VK_CHANNEL_SWIZZLE_R";
+ case VK_CHANNEL_SWIZZLE_G: return "VK_CHANNEL_SWIZZLE_G";
+ case VK_CHANNEL_SWIZZLE_B: return "VK_CHANNEL_SWIZZLE_B";
+ case VK_CHANNEL_SWIZZLE_A: return "VK_CHANNEL_SWIZZLE_A";
+ default: return DE_NULL;
+ }
+}
+
+const char* getShaderStageName (VkShaderStage value)
+{
+ switch (value)
+ {
+ case VK_SHADER_STAGE_VERTEX: return "VK_SHADER_STAGE_VERTEX";
+ case VK_SHADER_STAGE_TESS_CONTROL: return "VK_SHADER_STAGE_TESS_CONTROL";
+ case VK_SHADER_STAGE_TESS_EVALUATION: return "VK_SHADER_STAGE_TESS_EVALUATION";
+ case VK_SHADER_STAGE_GEOMETRY: return "VK_SHADER_STAGE_GEOMETRY";
+ case VK_SHADER_STAGE_FRAGMENT: return "VK_SHADER_STAGE_FRAGMENT";
+ case VK_SHADER_STAGE_COMPUTE: return "VK_SHADER_STAGE_COMPUTE";
+ default: return DE_NULL;
+ }
+}
+
+const char* getVertexInputStepRateName (VkVertexInputStepRate value)
+{
+ switch (value)
+ {
+ case VK_VERTEX_INPUT_STEP_RATE_VERTEX: return "VK_VERTEX_INPUT_STEP_RATE_VERTEX";
+ case VK_VERTEX_INPUT_STEP_RATE_INSTANCE: return "VK_VERTEX_INPUT_STEP_RATE_INSTANCE";
+ default: return DE_NULL;
+ }
+}
+
+const char* getPrimitiveTopologyName (VkPrimitiveTopology value)
+{
+ switch (value)
+ {
+ case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: return "VK_PRIMITIVE_TOPOLOGY_POINT_LIST";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: return "VK_PRIMITIVE_TOPOLOGY_LINE_LIST";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: return "VK_PRIMITIVE_TOPOLOGY_LINE_STRIP";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ: return "VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ: return "VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ: return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ: return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ";
+ case VK_PRIMITIVE_TOPOLOGY_PATCH: return "VK_PRIMITIVE_TOPOLOGY_PATCH";
+ default: return DE_NULL;
+ }
+}
+
+const char* getFillModeName (VkFillMode value)
+{
+ switch (value)
+ {
+ case VK_FILL_MODE_POINTS: return "VK_FILL_MODE_POINTS";
+ case VK_FILL_MODE_WIREFRAME: return "VK_FILL_MODE_WIREFRAME";
+ case VK_FILL_MODE_SOLID: return "VK_FILL_MODE_SOLID";
+ default: return DE_NULL;
+ }
+}
+
+const char* getCullModeName (VkCullMode value)
+{
+ switch (value)
+ {
+ case VK_CULL_MODE_NONE: return "VK_CULL_MODE_NONE";
+ case VK_CULL_MODE_FRONT: return "VK_CULL_MODE_FRONT";
+ case VK_CULL_MODE_BACK: return "VK_CULL_MODE_BACK";
+ case VK_CULL_MODE_FRONT_AND_BACK: return "VK_CULL_MODE_FRONT_AND_BACK";
+ default: return DE_NULL;
+ }
+}
+
+const char* getFrontFaceName (VkFrontFace value)
+{
+ switch (value)
+ {
+ case VK_FRONT_FACE_CCW: return "VK_FRONT_FACE_CCW";
+ case VK_FRONT_FACE_CW: return "VK_FRONT_FACE_CW";
+ default: return DE_NULL;
+ }
+}
+
+const char* getCompareOpName (VkCompareOp value)
+{
+ switch (value)
+ {
+ case VK_COMPARE_OP_NEVER: return "VK_COMPARE_OP_NEVER";
+ case VK_COMPARE_OP_LESS: return "VK_COMPARE_OP_LESS";
+ case VK_COMPARE_OP_EQUAL: return "VK_COMPARE_OP_EQUAL";
+ case VK_COMPARE_OP_LESS_EQUAL: return "VK_COMPARE_OP_LESS_EQUAL";
+ case VK_COMPARE_OP_GREATER: return "VK_COMPARE_OP_GREATER";
+ case VK_COMPARE_OP_NOT_EQUAL: return "VK_COMPARE_OP_NOT_EQUAL";
+ case VK_COMPARE_OP_GREATER_EQUAL: return "VK_COMPARE_OP_GREATER_EQUAL";
+ case VK_COMPARE_OP_ALWAYS: return "VK_COMPARE_OP_ALWAYS";
+ default: return DE_NULL;
+ }
+}
+
+const char* getStencilOpName (VkStencilOp value)
+{
+ switch (value)
+ {
+ case VK_STENCIL_OP_KEEP: return "VK_STENCIL_OP_KEEP";
+ case VK_STENCIL_OP_ZERO: return "VK_STENCIL_OP_ZERO";
+ case VK_STENCIL_OP_REPLACE: return "VK_STENCIL_OP_REPLACE";
+ case VK_STENCIL_OP_INC_CLAMP: return "VK_STENCIL_OP_INC_CLAMP";
+ case VK_STENCIL_OP_DEC_CLAMP: return "VK_STENCIL_OP_DEC_CLAMP";
+ case VK_STENCIL_OP_INVERT: return "VK_STENCIL_OP_INVERT";
+ case VK_STENCIL_OP_INC_WRAP: return "VK_STENCIL_OP_INC_WRAP";
+ case VK_STENCIL_OP_DEC_WRAP: return "VK_STENCIL_OP_DEC_WRAP";
+ default: return DE_NULL;
+ }
+}
+
+const char* getLogicOpName (VkLogicOp value)
+{
+ switch (value)
+ {
+ case VK_LOGIC_OP_CLEAR: return "VK_LOGIC_OP_CLEAR";
+ case VK_LOGIC_OP_AND: return "VK_LOGIC_OP_AND";
+ case VK_LOGIC_OP_AND_REVERSE: return "VK_LOGIC_OP_AND_REVERSE";
+ case VK_LOGIC_OP_COPY: return "VK_LOGIC_OP_COPY";
+ case VK_LOGIC_OP_AND_INVERTED: return "VK_LOGIC_OP_AND_INVERTED";
+ case VK_LOGIC_OP_NOOP: return "VK_LOGIC_OP_NOOP";
+ case VK_LOGIC_OP_XOR: return "VK_LOGIC_OP_XOR";
+ case VK_LOGIC_OP_OR: return "VK_LOGIC_OP_OR";
+ case VK_LOGIC_OP_NOR: return "VK_LOGIC_OP_NOR";
+ case VK_LOGIC_OP_EQUIV: return "VK_LOGIC_OP_EQUIV";
+ case VK_LOGIC_OP_INVERT: return "VK_LOGIC_OP_INVERT";
+ case VK_LOGIC_OP_OR_REVERSE: return "VK_LOGIC_OP_OR_REVERSE";
+ case VK_LOGIC_OP_COPY_INVERTED: return "VK_LOGIC_OP_COPY_INVERTED";
+ case VK_LOGIC_OP_OR_INVERTED: return "VK_LOGIC_OP_OR_INVERTED";
+ case VK_LOGIC_OP_NAND: return "VK_LOGIC_OP_NAND";
+ case VK_LOGIC_OP_SET: return "VK_LOGIC_OP_SET";
+ default: return DE_NULL;
+ }
+}
+
+const char* getBlendName (VkBlend value)
+{
+ switch (value)
+ {
+ case VK_BLEND_ZERO: return "VK_BLEND_ZERO";
+ case VK_BLEND_ONE: return "VK_BLEND_ONE";
+ case VK_BLEND_SRC_COLOR: return "VK_BLEND_SRC_COLOR";
+ case VK_BLEND_ONE_MINUS_SRC_COLOR: return "VK_BLEND_ONE_MINUS_SRC_COLOR";
+ case VK_BLEND_DEST_COLOR: return "VK_BLEND_DEST_COLOR";
+ case VK_BLEND_ONE_MINUS_DEST_COLOR: return "VK_BLEND_ONE_MINUS_DEST_COLOR";
+ case VK_BLEND_SRC_ALPHA: return "VK_BLEND_SRC_ALPHA";
+ case VK_BLEND_ONE_MINUS_SRC_ALPHA: return "VK_BLEND_ONE_MINUS_SRC_ALPHA";
+ case VK_BLEND_DEST_ALPHA: return "VK_BLEND_DEST_ALPHA";
+ case VK_BLEND_ONE_MINUS_DEST_ALPHA: return "VK_BLEND_ONE_MINUS_DEST_ALPHA";
+ case VK_BLEND_CONSTANT_COLOR: return "VK_BLEND_CONSTANT_COLOR";
+ case VK_BLEND_ONE_MINUS_CONSTANT_COLOR: return "VK_BLEND_ONE_MINUS_CONSTANT_COLOR";
+ case VK_BLEND_CONSTANT_ALPHA: return "VK_BLEND_CONSTANT_ALPHA";
+ case VK_BLEND_ONE_MINUS_CONSTANT_ALPHA: return "VK_BLEND_ONE_MINUS_CONSTANT_ALPHA";
+ case VK_BLEND_SRC_ALPHA_SATURATE: return "VK_BLEND_SRC_ALPHA_SATURATE";
+ case VK_BLEND_SRC1_COLOR: return "VK_BLEND_SRC1_COLOR";
+ case VK_BLEND_ONE_MINUS_SRC1_COLOR: return "VK_BLEND_ONE_MINUS_SRC1_COLOR";
+ case VK_BLEND_SRC1_ALPHA: return "VK_BLEND_SRC1_ALPHA";
+ case VK_BLEND_ONE_MINUS_SRC1_ALPHA: return "VK_BLEND_ONE_MINUS_SRC1_ALPHA";
+ default: return DE_NULL;
+ }
+}
+
+const char* getBlendOpName (VkBlendOp value)
+{
+ switch (value)
+ {
+ case VK_BLEND_OP_ADD: return "VK_BLEND_OP_ADD";
+ case VK_BLEND_OP_SUBTRACT: return "VK_BLEND_OP_SUBTRACT";
+ case VK_BLEND_OP_REVERSE_SUBTRACT: return "VK_BLEND_OP_REVERSE_SUBTRACT";
+ case VK_BLEND_OP_MIN: return "VK_BLEND_OP_MIN";
+ case VK_BLEND_OP_MAX: return "VK_BLEND_OP_MAX";
+ default: return DE_NULL;
+ }
+}
+
+const char* getTexFilterName (VkTexFilter value)
+{
+ switch (value)
+ {
+ case VK_TEX_FILTER_NEAREST: return "VK_TEX_FILTER_NEAREST";
+ case VK_TEX_FILTER_LINEAR: return "VK_TEX_FILTER_LINEAR";
+ default: return DE_NULL;
+ }
+}
+
+const char* getTexMipmapModeName (VkTexMipmapMode value)
+{
+ switch (value)
+ {
+ case VK_TEX_MIPMAP_MODE_BASE: return "VK_TEX_MIPMAP_MODE_BASE";
+ case VK_TEX_MIPMAP_MODE_NEAREST: return "VK_TEX_MIPMAP_MODE_NEAREST";
+ case VK_TEX_MIPMAP_MODE_LINEAR: return "VK_TEX_MIPMAP_MODE_LINEAR";
+ default: return DE_NULL;
+ }
+}
+
+const char* getTexAddressName (VkTexAddress value)
+{
+ switch (value)
+ {
+ case VK_TEX_ADDRESS_WRAP: return "VK_TEX_ADDRESS_WRAP";
+ case VK_TEX_ADDRESS_MIRROR: return "VK_TEX_ADDRESS_MIRROR";
+ case VK_TEX_ADDRESS_CLAMP: return "VK_TEX_ADDRESS_CLAMP";
+ case VK_TEX_ADDRESS_MIRROR_ONCE: return "VK_TEX_ADDRESS_MIRROR_ONCE";
+ case VK_TEX_ADDRESS_CLAMP_BORDER: return "VK_TEX_ADDRESS_CLAMP_BORDER";
+ default: return DE_NULL;
+ }
+}
+
+const char* getBorderColorName (VkBorderColor value)
+{
+ switch (value)
+ {
+ case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK: return "VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK";
+ case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK: return "VK_BORDER_COLOR_INT_TRANSPARENT_BLACK";
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK: return "VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK";
+ case VK_BORDER_COLOR_INT_OPAQUE_BLACK: return "VK_BORDER_COLOR_INT_OPAQUE_BLACK";
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE: return "VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE";
+ case VK_BORDER_COLOR_INT_OPAQUE_WHITE: return "VK_BORDER_COLOR_INT_OPAQUE_WHITE";
+ default: return DE_NULL;
+ }
+}
+
+const char* getDescriptorTypeName (VkDescriptorType value)
+{
+ switch (value)
+ {
+ case VK_DESCRIPTOR_TYPE_SAMPLER: return "VK_DESCRIPTOR_TYPE_SAMPLER";
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: return "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER";
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: return "VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE";
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: return "VK_DESCRIPTOR_TYPE_STORAGE_IMAGE";
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: return "VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER";
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: return "VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER";
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: return "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER";
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: return "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER";
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: return "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC";
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: return "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC";
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: return "VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT";
+ default: return DE_NULL;
+ }
+}
+
+const char* getDescriptorPoolUsageName (VkDescriptorPoolUsage value)
+{
+ switch (value)
+ {
+ case VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT: return "VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT";
+ case VK_DESCRIPTOR_POOL_USAGE_DYNAMIC: return "VK_DESCRIPTOR_POOL_USAGE_DYNAMIC";
+ default: return DE_NULL;
+ }
+}
+
+const char* getDescriptorSetUsageName (VkDescriptorSetUsage value)
+{
+ switch (value)
+ {
+ case VK_DESCRIPTOR_SET_USAGE_ONE_SHOT: return "VK_DESCRIPTOR_SET_USAGE_ONE_SHOT";
+ case VK_DESCRIPTOR_SET_USAGE_STATIC: return "VK_DESCRIPTOR_SET_USAGE_STATIC";
+ default: return DE_NULL;
+ }
+}
+
+const char* getImageLayoutName (VkImageLayout value)
+{
+ switch (value)
+ {
+ case VK_IMAGE_LAYOUT_UNDEFINED: return "VK_IMAGE_LAYOUT_UNDEFINED";
+ case VK_IMAGE_LAYOUT_GENERAL: return "VK_IMAGE_LAYOUT_GENERAL";
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL";
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL";
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL";
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: return "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL";
+ case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL: return "VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL";
+ case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL: return "VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL";
+ default: return DE_NULL;
+ }
+}
+
+const char* getAttachmentLoadOpName (VkAttachmentLoadOp value)
+{
+ switch (value)
+ {
+ case VK_ATTACHMENT_LOAD_OP_LOAD: return "VK_ATTACHMENT_LOAD_OP_LOAD";
+ case VK_ATTACHMENT_LOAD_OP_CLEAR: return "VK_ATTACHMENT_LOAD_OP_CLEAR";
+ case VK_ATTACHMENT_LOAD_OP_DONT_CARE: return "VK_ATTACHMENT_LOAD_OP_DONT_CARE";
+ default: return DE_NULL;
+ }
+}
+
+const char* getAttachmentStoreOpName (VkAttachmentStoreOp value)
+{
+ switch (value)
+ {
+ case VK_ATTACHMENT_STORE_OP_STORE: return "VK_ATTACHMENT_STORE_OP_STORE";
+ case VK_ATTACHMENT_STORE_OP_DONT_CARE: return "VK_ATTACHMENT_STORE_OP_DONT_CARE";
+ default: return DE_NULL;
+ }
+}
+
+const char* getPipelineBindPointName (VkPipelineBindPoint value)
+{
+ switch (value)
+ {
+ case VK_PIPELINE_BIND_POINT_COMPUTE: return "VK_PIPELINE_BIND_POINT_COMPUTE";
+ case VK_PIPELINE_BIND_POINT_GRAPHICS: return "VK_PIPELINE_BIND_POINT_GRAPHICS";
+ default: return DE_NULL;
+ }
+}
+
+const char* getCmdBufferLevelName (VkCmdBufferLevel value)
+{
+ switch (value)
+ {
+ case VK_CMD_BUFFER_LEVEL_PRIMARY: return "VK_CMD_BUFFER_LEVEL_PRIMARY";
+ case VK_CMD_BUFFER_LEVEL_SECONDARY: return "VK_CMD_BUFFER_LEVEL_SECONDARY";
+ default: return DE_NULL;
+ }
+}
+
+const char* getIndexTypeName (VkIndexType value)
+{
+ switch (value)
+ {
+ case VK_INDEX_TYPE_UINT16: return "VK_INDEX_TYPE_UINT16";
+ case VK_INDEX_TYPE_UINT32: return "VK_INDEX_TYPE_UINT32";
+ default: return DE_NULL;
+ }
+}
+
+const char* getTimestampTypeName (VkTimestampType value)
+{
+ switch (value)
+ {
+ case VK_TIMESTAMP_TYPE_TOP: return "VK_TIMESTAMP_TYPE_TOP";
+ case VK_TIMESTAMP_TYPE_BOTTOM: return "VK_TIMESTAMP_TYPE_BOTTOM";
+ default: return DE_NULL;
+ }
+}
+
+const char* getRenderPassContentsName (VkRenderPassContents value)
+{
+ switch (value)
+ {
+ case VK_RENDER_PASS_CONTENTS_INLINE: return "VK_RENDER_PASS_CONTENTS_INLINE";
+ case VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS: return "VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS";
+ default: return DE_NULL;
+ }
+}
+
+tcu::Format::Bitfield<32> getFormatFeatureFlagsStr (VkFormatFeatureFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, "VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT, "VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT, "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_FORMAT_FEATURE_CONVERSION_BIT, "VK_FORMAT_FEATURE_CONVERSION_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getImageUsageFlagsStr (VkImageUsageFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_GENERAL, "VK_IMAGE_USAGE_GENERAL"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT, "VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT, "VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_SAMPLED_BIT, "VK_IMAGE_USAGE_SAMPLED_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_STORAGE_BIT, "VK_IMAGE_USAGE_STORAGE_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_DEPTH_STENCIL_BIT, "VK_IMAGE_USAGE_DEPTH_STENCIL_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getQueueFlagsStr (VkQueueFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_QUEUE_GRAPHICS_BIT, "VK_QUEUE_GRAPHICS_BIT"),
+ tcu::Format::BitDesc(VK_QUEUE_COMPUTE_BIT, "VK_QUEUE_COMPUTE_BIT"),
+ tcu::Format::BitDesc(VK_QUEUE_DMA_BIT, "VK_QUEUE_DMA_BIT"),
+ tcu::Format::BitDesc(VK_QUEUE_SPARSE_MEMMGR_BIT, "VK_QUEUE_SPARSE_MEMMGR_BIT"),
+ tcu::Format::BitDesc(VK_QUEUE_EXTENDED_BIT, "VK_QUEUE_EXTENDED_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getMemoryPropertyFlagsStr (VkMemoryPropertyFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_MEMORY_PROPERTY_DEVICE_ONLY, "VK_MEMORY_PROPERTY_DEVICE_ONLY"),
+ tcu::Format::BitDesc(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, "VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_PROPERTY_HOST_NON_COHERENT_BIT, "VK_MEMORY_PROPERTY_HOST_NON_COHERENT_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT, "VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT, "VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, "VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getMemoryHeapFlagsStr (VkMemoryHeapFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_MEMORY_HEAP_HOST_LOCAL, "VK_MEMORY_HEAP_HOST_LOCAL"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getDeviceCreateFlagsStr (VkDeviceCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_DEVICE_CREATE_VALIDATION_BIT, "VK_DEVICE_CREATE_VALIDATION_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getSparseImageFormatFlagsStr (VkSparseImageFormatFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_SPARSE_IMAGE_FMT_SINGLE_MIPTAIL_BIT, "VK_SPARSE_IMAGE_FMT_SINGLE_MIPTAIL_BIT"),
+ tcu::Format::BitDesc(VK_SPARSE_IMAGE_FMT_ALIGNED_MIP_SIZE_BIT, "VK_SPARSE_IMAGE_FMT_ALIGNED_MIP_SIZE_BIT"),
+ tcu::Format::BitDesc(VK_SPARSE_IMAGE_FMT_NONSTD_BLOCK_SIZE_BIT, "VK_SPARSE_IMAGE_FMT_NONSTD_BLOCK_SIZE_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getSparseMemoryBindFlagsStr (VkSparseMemoryBindFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_SPARSE_MEMORY_BIND_REPLICATE_64KIB_BLOCK_BIT, "VK_SPARSE_MEMORY_BIND_REPLICATE_64KIB_BLOCK_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getFenceCreateFlagsStr (VkFenceCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_FENCE_CREATE_SIGNALED_BIT, "VK_FENCE_CREATE_SIGNALED_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getQueryPipelineStatisticFlagsStr (VkQueryPipelineStatisticFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_IA_VERTICES_BIT, "VK_QUERY_PIPELINE_STATISTIC_IA_VERTICES_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_IA_PRIMITIVES_BIT, "VK_QUERY_PIPELINE_STATISTIC_IA_PRIMITIVES_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_VS_INVOCATIONS_BIT, "VK_QUERY_PIPELINE_STATISTIC_VS_INVOCATIONS_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_GS_INVOCATIONS_BIT, "VK_QUERY_PIPELINE_STATISTIC_GS_INVOCATIONS_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_GS_PRIMITIVES_BIT, "VK_QUERY_PIPELINE_STATISTIC_GS_PRIMITIVES_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_C_INVOCATIONS_BIT, "VK_QUERY_PIPELINE_STATISTIC_C_INVOCATIONS_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_C_PRIMITIVES_BIT, "VK_QUERY_PIPELINE_STATISTIC_C_PRIMITIVES_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_FS_INVOCATIONS_BIT, "VK_QUERY_PIPELINE_STATISTIC_FS_INVOCATIONS_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_TCS_PATCHES_BIT, "VK_QUERY_PIPELINE_STATISTIC_TCS_PATCHES_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_TES_INVOCATIONS_BIT, "VK_QUERY_PIPELINE_STATISTIC_TES_INVOCATIONS_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_PIPELINE_STATISTIC_CS_INVOCATIONS_BIT, "VK_QUERY_PIPELINE_STATISTIC_CS_INVOCATIONS_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getQueryResultFlagsStr (VkQueryResultFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_QUERY_RESULT_DEFAULT, "VK_QUERY_RESULT_DEFAULT"),
+ tcu::Format::BitDesc(VK_QUERY_RESULT_64_BIT, "VK_QUERY_RESULT_64_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_RESULT_WAIT_BIT, "VK_QUERY_RESULT_WAIT_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, "VK_QUERY_RESULT_WITH_AVAILABILITY_BIT"),
+ tcu::Format::BitDesc(VK_QUERY_RESULT_PARTIAL_BIT, "VK_QUERY_RESULT_PARTIAL_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getBufferUsageFlagsStr (VkBufferUsageFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_GENERAL, "VK_BUFFER_USAGE_GENERAL"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT, "VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, "VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_INDEX_BUFFER_BIT, "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getBufferCreateFlagsStr (VkBufferCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_BUFFER_CREATE_SPARSE_BIT, "VK_BUFFER_CREATE_SPARSE_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, "VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT"),
+ tcu::Format::BitDesc(VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, "VK_BUFFER_CREATE_SPARSE_ALIASED_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getImageCreateFlagsStr (VkImageCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_IMAGE_CREATE_SPARSE_BIT, "VK_IMAGE_CREATE_SPARSE_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_CREATE_INVARIANT_DATA_BIT, "VK_IMAGE_CREATE_INVARIANT_DATA_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, "VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, "VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getAttachmentViewCreateFlagsStr (VkAttachmentViewCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_DEPTH_BIT, "VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_DEPTH_BIT"),
+ tcu::Format::BitDesc(VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_STENCIL_BIT, "VK_ATTACHMENT_VIEW_CREATE_READ_ONLY_STENCIL_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getChannelFlagsStr (VkChannelFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_CHANNEL_R_BIT, "VK_CHANNEL_R_BIT"),
+ tcu::Format::BitDesc(VK_CHANNEL_G_BIT, "VK_CHANNEL_G_BIT"),
+ tcu::Format::BitDesc(VK_CHANNEL_B_BIT, "VK_CHANNEL_B_BIT"),
+ tcu::Format::BitDesc(VK_CHANNEL_A_BIT, "VK_CHANNEL_A_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getPipelineCreateFlagsStr (VkPipelineCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, "VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT, "VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_CREATE_DERIVATIVE_BIT, "VK_PIPELINE_CREATE_DERIVATIVE_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getShaderStageFlagsStr (VkShaderStageFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_SHADER_STAGE_VERTEX_BIT, "VK_SHADER_STAGE_VERTEX_BIT"),
+ tcu::Format::BitDesc(VK_SHADER_STAGE_TESS_CONTROL_BIT, "VK_SHADER_STAGE_TESS_CONTROL_BIT"),
+ tcu::Format::BitDesc(VK_SHADER_STAGE_TESS_EVALUATION_BIT, "VK_SHADER_STAGE_TESS_EVALUATION_BIT"),
+ tcu::Format::BitDesc(VK_SHADER_STAGE_GEOMETRY_BIT, "VK_SHADER_STAGE_GEOMETRY_BIT"),
+ tcu::Format::BitDesc(VK_SHADER_STAGE_FRAGMENT_BIT, "VK_SHADER_STAGE_FRAGMENT_BIT"),
+ tcu::Format::BitDesc(VK_SHADER_STAGE_COMPUTE_BIT, "VK_SHADER_STAGE_COMPUTE_BIT"),
+ tcu::Format::BitDesc(VK_SHADER_STAGE_ALL, "VK_SHADER_STAGE_ALL"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getSubpassDescriptionFlagsStr (VkSubpassDescriptionFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_SUBPASS_DESCRIPTION_NO_OVERDRAW_BIT, "VK_SUBPASS_DESCRIPTION_NO_OVERDRAW_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getPipelineStageFlagsStr (VkPipelineStageFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, "VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, "VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, "VK_PIPELINE_STAGE_VERTEX_INPUT_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, "VK_PIPELINE_STAGE_VERTEX_SHADER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_TESS_CONTROL_SHADER_BIT, "VK_PIPELINE_STAGE_TESS_CONTROL_SHADER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_TESS_EVALUATION_SHADER_BIT, "VK_PIPELINE_STAGE_TESS_EVALUATION_SHADER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, "VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, "VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, "VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, "VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, "VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, "VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_TRANSFER_BIT, "VK_PIPELINE_STAGE_TRANSFER_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_TRANSITION_BIT, "VK_PIPELINE_STAGE_TRANSITION_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_HOST_BIT, "VK_PIPELINE_STAGE_HOST_BIT"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_ALL_GRAPHICS, "VK_PIPELINE_STAGE_ALL_GRAPHICS"),
+ tcu::Format::BitDesc(VK_PIPELINE_STAGE_ALL_GPU_COMMANDS, "VK_PIPELINE_STAGE_ALL_GPU_COMMANDS"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getMemoryOutputFlagsStr (VkMemoryOutputFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_MEMORY_OUTPUT_HOST_WRITE_BIT, "VK_MEMORY_OUTPUT_HOST_WRITE_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_OUTPUT_SHADER_WRITE_BIT, "VK_MEMORY_OUTPUT_SHADER_WRITE_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT, "VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT, "VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_OUTPUT_TRANSFER_BIT, "VK_MEMORY_OUTPUT_TRANSFER_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getMemoryInputFlagsStr (VkMemoryInputFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_HOST_READ_BIT, "VK_MEMORY_INPUT_HOST_READ_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT, "VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_INDEX_FETCH_BIT, "VK_MEMORY_INPUT_INDEX_FETCH_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT, "VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_UNIFORM_READ_BIT, "VK_MEMORY_INPUT_UNIFORM_READ_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_SHADER_READ_BIT, "VK_MEMORY_INPUT_SHADER_READ_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT, "VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT, "VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_INPUT_ATTACHMENT_BIT, "VK_MEMORY_INPUT_INPUT_ATTACHMENT_BIT"),
+ tcu::Format::BitDesc(VK_MEMORY_INPUT_TRANSFER_BIT, "VK_MEMORY_INPUT_TRANSFER_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getCmdPoolCreateFlagsStr (VkCmdPoolCreateFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_CMD_POOL_CREATE_TRANSIENT_BIT, "VK_CMD_POOL_CREATE_TRANSIENT_BIT"),
+ tcu::Format::BitDesc(VK_CMD_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, "VK_CMD_POOL_CREATE_RESET_COMMAND_BUFFER_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getCmdPoolResetFlagsStr (VkCmdPoolResetFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_CMD_POOL_RESET_RELEASE_RESOURCES, "VK_CMD_POOL_RESET_RELEASE_RESOURCES"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getCmdBufferOptimizeFlagsStr (VkCmdBufferOptimizeFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT, "VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT"),
+ tcu::Format::BitDesc(VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT, "VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT"),
+ tcu::Format::BitDesc(VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, "VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT"),
+ tcu::Format::BitDesc(VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT, "VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT"),
+ tcu::Format::BitDesc(VK_CMD_BUFFER_OPTIMIZE_NO_SIMULTANEOUS_USE_BIT, "VK_CMD_BUFFER_OPTIMIZE_NO_SIMULTANEOUS_USE_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getCmdBufferResetFlagsStr (VkCmdBufferResetFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_CMD_BUFFER_RESET_RELEASE_RESOURCES, "VK_CMD_BUFFER_RESET_RELEASE_RESOURCES"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getImageAspectFlagsStr (VkImageAspectFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_IMAGE_ASPECT_COLOR_BIT, "VK_IMAGE_ASPECT_COLOR_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_ASPECT_DEPTH_BIT, "VK_IMAGE_ASPECT_DEPTH_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_ASPECT_STENCIL_BIT, "VK_IMAGE_ASPECT_STENCIL_BIT"),
+ tcu::Format::BitDesc(VK_IMAGE_ASPECT_METADATA_BIT, "VK_IMAGE_ASPECT_METADATA_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+tcu::Format::Bitfield<32> getQueryControlFlagsStr (VkQueryControlFlags value)
+{
+ static const tcu::Format::BitDesc s_desc[] =
+ {
+ tcu::Format::BitDesc(VK_QUERY_CONTROL_CONSERVATIVE_BIT, "VK_QUERY_CONTROL_CONSERVATIVE_BIT"),
+ };
+ return tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));
+}
+
+std::ostream& operator<< (std::ostream& s, const VkApplicationInfo& value)
+{
+ s << "VkApplicationInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tpAppName = " << getCharPtrStr(value.pAppName) << '\n';
+ s << "\tappVersion = " << value.appVersion << '\n';
+ s << "\tpEngineName = " << getCharPtrStr(value.pEngineName) << '\n';
+ s << "\tengineVersion = " << value.engineVersion << '\n';
+ s << "\tapiVersion = " << value.apiVersion << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkAllocCallbacks& value)
+{
+ s << "VkAllocCallbacks = {\n";
+ s << "\tpUserData = " << value.pUserData << '\n';
+ s << "\tpfnAlloc = " << value.pfnAlloc << '\n';
+ s << "\tpfnFree = " << value.pfnFree << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkInstanceCreateInfo& value)
+{
+ s << "VkInstanceCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tpAppInfo = " << value.pAppInfo << '\n';
+ s << "\tpAllocCb = " << value.pAllocCb << '\n';
+ s << "\tlayerCount = " << value.layerCount << '\n';
+ s << "\tppEnabledLayerNames = " << value.ppEnabledLayerNames << '\n';
+ s << "\textensionCount = " << value.extensionCount << '\n';
+ s << "\tppEnabledExtensionNames = " << value.ppEnabledExtensionNames << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceFeatures& value)
+{
+ s << "VkPhysicalDeviceFeatures = {\n";
+ s << "\trobustBufferAccess = " << value.robustBufferAccess << '\n';
+ s << "\tfullDrawIndexUint32 = " << value.fullDrawIndexUint32 << '\n';
+ s << "\timageCubeArray = " << value.imageCubeArray << '\n';
+ s << "\tindependentBlend = " << value.independentBlend << '\n';
+ s << "\tgeometryShader = " << value.geometryShader << '\n';
+ s << "\ttessellationShader = " << value.tessellationShader << '\n';
+ s << "\tsampleRateShading = " << value.sampleRateShading << '\n';
+ s << "\tdualSourceBlend = " << value.dualSourceBlend << '\n';
+ s << "\tlogicOp = " << value.logicOp << '\n';
+ s << "\tinstancedDrawIndirect = " << value.instancedDrawIndirect << '\n';
+ s << "\tdepthClip = " << value.depthClip << '\n';
+ s << "\tdepthBiasClamp = " << value.depthBiasClamp << '\n';
+ s << "\tfillModeNonSolid = " << value.fillModeNonSolid << '\n';
+ s << "\tdepthBounds = " << value.depthBounds << '\n';
+ s << "\twideLines = " << value.wideLines << '\n';
+ s << "\tlargePoints = " << value.largePoints << '\n';
+ s << "\ttextureCompressionETC2 = " << value.textureCompressionETC2 << '\n';
+ s << "\ttextureCompressionASTC_LDR = " << value.textureCompressionASTC_LDR << '\n';
+ s << "\ttextureCompressionBC = " << value.textureCompressionBC << '\n';
+ s << "\tpipelineStatisticsQuery = " << value.pipelineStatisticsQuery << '\n';
+ s << "\tvertexSideEffects = " << value.vertexSideEffects << '\n';
+ s << "\ttessellationSideEffects = " << value.tessellationSideEffects << '\n';
+ s << "\tgeometrySideEffects = " << value.geometrySideEffects << '\n';
+ s << "\tfragmentSideEffects = " << value.fragmentSideEffects << '\n';
+ s << "\tshaderTessellationPointSize = " << value.shaderTessellationPointSize << '\n';
+ s << "\tshaderGeometryPointSize = " << value.shaderGeometryPointSize << '\n';
+ s << "\tshaderTextureGatherExtended = " << value.shaderTextureGatherExtended << '\n';
+ s << "\tshaderStorageImageExtendedFormats = " << value.shaderStorageImageExtendedFormats << '\n';
+ s << "\tshaderStorageImageMultisample = " << value.shaderStorageImageMultisample << '\n';
+ s << "\tshaderStorageBufferArrayConstantIndexing = " << value.shaderStorageBufferArrayConstantIndexing << '\n';
+ s << "\tshaderStorageImageArrayConstantIndexing = " << value.shaderStorageImageArrayConstantIndexing << '\n';
+ s << "\tshaderUniformBufferArrayDynamicIndexing = " << value.shaderUniformBufferArrayDynamicIndexing << '\n';
+ s << "\tshaderSampledImageArrayDynamicIndexing = " << value.shaderSampledImageArrayDynamicIndexing << '\n';
+ s << "\tshaderStorageBufferArrayDynamicIndexing = " << value.shaderStorageBufferArrayDynamicIndexing << '\n';
+ s << "\tshaderStorageImageArrayDynamicIndexing = " << value.shaderStorageImageArrayDynamicIndexing << '\n';
+ s << "\tshaderClipDistance = " << value.shaderClipDistance << '\n';
+ s << "\tshaderCullDistance = " << value.shaderCullDistance << '\n';
+ s << "\tshaderFloat64 = " << value.shaderFloat64 << '\n';
+ s << "\tshaderInt64 = " << value.shaderInt64 << '\n';
+ s << "\tshaderFloat16 = " << value.shaderFloat16 << '\n';
+ s << "\tshaderInt16 = " << value.shaderInt16 << '\n';
+ s << "\tshaderResourceResidency = " << value.shaderResourceResidency << '\n';
+ s << "\tshaderResourceMinLOD = " << value.shaderResourceMinLOD << '\n';
+ s << "\tsparse = " << value.sparse << '\n';
+ s << "\tsparseResidencyBuffer = " << value.sparseResidencyBuffer << '\n';
+ s << "\tsparseResidencyImage2D = " << value.sparseResidencyImage2D << '\n';
+ s << "\tsparseResidencyImage3D = " << value.sparseResidencyImage3D << '\n';
+ s << "\tsparseResidency2Samples = " << value.sparseResidency2Samples << '\n';
+ s << "\tsparseResidency4Samples = " << value.sparseResidency4Samples << '\n';
+ s << "\tsparseResidency8Samples = " << value.sparseResidency8Samples << '\n';
+ s << "\tsparseResidency16Samples = " << value.sparseResidency16Samples << '\n';
+ s << "\tsparseResidencyStandard2DBlockShape = " << value.sparseResidencyStandard2DBlockShape << '\n';
+ s << "\tsparseResidencyStandard2DMSBlockShape = " << value.sparseResidencyStandard2DMSBlockShape << '\n';
+ s << "\tsparseResidencyStandard3DBlockShape = " << value.sparseResidencyStandard3DBlockShape << '\n';
+ s << "\tsparseResidencyAlignedMipSize = " << value.sparseResidencyAlignedMipSize << '\n';
+ s << "\tsparseResidencyNonResident = " << value.sparseResidencyNonResident << '\n';
+ s << "\tsparseResidencyNonResidentStrict = " << value.sparseResidencyNonResidentStrict << '\n';
+ s << "\tsparseResidencyAliased = " << value.sparseResidencyAliased << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkFormatProperties& value)
+{
+ s << "VkFormatProperties = {\n";
+ s << "\tlinearTilingFeatures = " << getFormatFeatureFlagsStr(value.linearTilingFeatures) << '\n';
+ s << "\toptimalTilingFeatures = " << getFormatFeatureFlagsStr(value.optimalTilingFeatures) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageFormatProperties& value)
+{
+ s << "VkImageFormatProperties = {\n";
+ s << "\tmaxResourceSize = " << value.maxResourceSize << '\n';
+ s << "\tmaxSamples = " << value.maxSamples << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceLimits& value)
+{
+ s << "VkPhysicalDeviceLimits = {\n";
+ s << "\tmaxImageDimension1D = " << value.maxImageDimension1D << '\n';
+ s << "\tmaxImageDimension2D = " << value.maxImageDimension2D << '\n';
+ s << "\tmaxImageDimension3D = " << value.maxImageDimension3D << '\n';
+ s << "\tmaxImageDimensionCube = " << value.maxImageDimensionCube << '\n';
+ s << "\tmaxImageArrayLayers = " << value.maxImageArrayLayers << '\n';
+ s << "\tmaxTexelBufferSize = " << value.maxTexelBufferSize << '\n';
+ s << "\tmaxUniformBufferSize = " << value.maxUniformBufferSize << '\n';
+ s << "\tmaxStorageBufferSize = " << value.maxStorageBufferSize << '\n';
+ s << "\tmaxPushConstantsSize = " << value.maxPushConstantsSize << '\n';
+ s << "\tmaxMemoryAllocationCount = " << value.maxMemoryAllocationCount << '\n';
+ s << "\tbufferImageGranularity = " << value.bufferImageGranularity << '\n';
+ s << "\tmaxBoundDescriptorSets = " << value.maxBoundDescriptorSets << '\n';
+ s << "\tmaxDescriptorSets = " << value.maxDescriptorSets << '\n';
+ s << "\tmaxPerStageDescriptorSamplers = " << value.maxPerStageDescriptorSamplers << '\n';
+ s << "\tmaxPerStageDescriptorUniformBuffers = " << value.maxPerStageDescriptorUniformBuffers << '\n';
+ s << "\tmaxPerStageDescriptorStorageBuffers = " << value.maxPerStageDescriptorStorageBuffers << '\n';
+ s << "\tmaxPerStageDescriptorSampledImages = " << value.maxPerStageDescriptorSampledImages << '\n';
+ s << "\tmaxPerStageDescriptorStorageImages = " << value.maxPerStageDescriptorStorageImages << '\n';
+ s << "\tmaxDescriptorSetSamplers = " << value.maxDescriptorSetSamplers << '\n';
+ s << "\tmaxDescriptorSetUniformBuffers = " << value.maxDescriptorSetUniformBuffers << '\n';
+ s << "\tmaxDescriptorSetStorageBuffers = " << value.maxDescriptorSetStorageBuffers << '\n';
+ s << "\tmaxDescriptorSetSampledImages = " << value.maxDescriptorSetSampledImages << '\n';
+ s << "\tmaxDescriptorSetStorageImages = " << value.maxDescriptorSetStorageImages << '\n';
+ s << "\tmaxVertexInputAttributes = " << value.maxVertexInputAttributes << '\n';
+ s << "\tmaxVertexInputAttributeOffset = " << value.maxVertexInputAttributeOffset << '\n';
+ s << "\tmaxVertexInputBindingStride = " << value.maxVertexInputBindingStride << '\n';
+ s << "\tmaxVertexOutputComponents = " << value.maxVertexOutputComponents << '\n';
+ s << "\tmaxTessGenLevel = " << value.maxTessGenLevel << '\n';
+ s << "\tmaxTessPatchSize = " << value.maxTessPatchSize << '\n';
+ s << "\tmaxTessControlPerVertexInputComponents = " << value.maxTessControlPerVertexInputComponents << '\n';
+ s << "\tmaxTessControlPerVertexOutputComponents = " << value.maxTessControlPerVertexOutputComponents << '\n';
+ s << "\tmaxTessControlPerPatchOutputComponents = " << value.maxTessControlPerPatchOutputComponents << '\n';
+ s << "\tmaxTessControlTotalOutputComponents = " << value.maxTessControlTotalOutputComponents << '\n';
+ s << "\tmaxTessEvaluationInputComponents = " << value.maxTessEvaluationInputComponents << '\n';
+ s << "\tmaxTessEvaluationOutputComponents = " << value.maxTessEvaluationOutputComponents << '\n';
+ s << "\tmaxGeometryShaderInvocations = " << value.maxGeometryShaderInvocations << '\n';
+ s << "\tmaxGeometryInputComponents = " << value.maxGeometryInputComponents << '\n';
+ s << "\tmaxGeometryOutputComponents = " << value.maxGeometryOutputComponents << '\n';
+ s << "\tmaxGeometryOutputVertices = " << value.maxGeometryOutputVertices << '\n';
+ s << "\tmaxGeometryTotalOutputComponents = " << value.maxGeometryTotalOutputComponents << '\n';
+ s << "\tmaxFragmentInputComponents = " << value.maxFragmentInputComponents << '\n';
+ s << "\tmaxFragmentOutputBuffers = " << value.maxFragmentOutputBuffers << '\n';
+ s << "\tmaxFragmentDualSourceBuffers = " << value.maxFragmentDualSourceBuffers << '\n';
+ s << "\tmaxFragmentCombinedOutputResources = " << value.maxFragmentCombinedOutputResources << '\n';
+ s << "\tmaxComputeSharedMemorySize = " << value.maxComputeSharedMemorySize << '\n';
+ s << "\tmaxComputeWorkGroupCount = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.maxComputeWorkGroupCount), DE_ARRAY_END(value.maxComputeWorkGroupCount)) << '\n';
+ s << "\tmaxComputeWorkGroupInvocations = " << value.maxComputeWorkGroupInvocations << '\n';
+ s << "\tmaxComputeWorkGroupSize = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.maxComputeWorkGroupSize), DE_ARRAY_END(value.maxComputeWorkGroupSize)) << '\n';
+ s << "\tsubPixelPrecisionBits = " << value.subPixelPrecisionBits << '\n';
+ s << "\tsubTexelPrecisionBits = " << value.subTexelPrecisionBits << '\n';
+ s << "\tmipmapPrecisionBits = " << value.mipmapPrecisionBits << '\n';
+ s << "\tmaxDrawIndexedIndexValue = " << value.maxDrawIndexedIndexValue << '\n';
+ s << "\tmaxDrawIndirectInstanceCount = " << value.maxDrawIndirectInstanceCount << '\n';
+ s << "\tprimitiveRestartForPatches = " << value.primitiveRestartForPatches << '\n';
+ s << "\tmaxSamplerLodBias = " << value.maxSamplerLodBias << '\n';
+ s << "\tmaxSamplerAnisotropy = " << value.maxSamplerAnisotropy << '\n';
+ s << "\tmaxViewports = " << value.maxViewports << '\n';
+ s << "\tmaxDynamicViewportStates = " << value.maxDynamicViewportStates << '\n';
+ s << "\tmaxViewportDimensions = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.maxViewportDimensions), DE_ARRAY_END(value.maxViewportDimensions)) << '\n';
+ s << "\tviewportBoundsRange = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.viewportBoundsRange), DE_ARRAY_END(value.viewportBoundsRange)) << '\n';
+ s << "\tviewportSubPixelBits = " << value.viewportSubPixelBits << '\n';
+ s << "\tminMemoryMapAlignment = " << value.minMemoryMapAlignment << '\n';
+ s << "\tminTexelBufferOffsetAlignment = " << value.minTexelBufferOffsetAlignment << '\n';
+ s << "\tminUniformBufferOffsetAlignment = " << value.minUniformBufferOffsetAlignment << '\n';
+ s << "\tminStorageBufferOffsetAlignment = " << value.minStorageBufferOffsetAlignment << '\n';
+ s << "\tminTexelOffset = " << value.minTexelOffset << '\n';
+ s << "\tmaxTexelOffset = " << value.maxTexelOffset << '\n';
+ s << "\tminTexelGatherOffset = " << value.minTexelGatherOffset << '\n';
+ s << "\tmaxTexelGatherOffset = " << value.maxTexelGatherOffset << '\n';
+ s << "\tminInterpolationOffset = " << value.minInterpolationOffset << '\n';
+ s << "\tmaxInterpolationOffset = " << value.maxInterpolationOffset << '\n';
+ s << "\tsubPixelInterpolationOffsetBits = " << value.subPixelInterpolationOffsetBits << '\n';
+ s << "\tmaxFramebufferWidth = " << value.maxFramebufferWidth << '\n';
+ s << "\tmaxFramebufferHeight = " << value.maxFramebufferHeight << '\n';
+ s << "\tmaxFramebufferLayers = " << value.maxFramebufferLayers << '\n';
+ s << "\tmaxFramebufferColorSamples = " << value.maxFramebufferColorSamples << '\n';
+ s << "\tmaxFramebufferDepthSamples = " << value.maxFramebufferDepthSamples << '\n';
+ s << "\tmaxFramebufferStencilSamples = " << value.maxFramebufferStencilSamples << '\n';
+ s << "\tmaxColorAttachments = " << value.maxColorAttachments << '\n';
+ s << "\tmaxSampledImageColorSamples = " << value.maxSampledImageColorSamples << '\n';
+ s << "\tmaxSampledImageDepthSamples = " << value.maxSampledImageDepthSamples << '\n';
+ s << "\tmaxSampledImageIntegerSamples = " << value.maxSampledImageIntegerSamples << '\n';
+ s << "\tmaxStorageImageSamples = " << value.maxStorageImageSamples << '\n';
+ s << "\tmaxSampleMaskWords = " << value.maxSampleMaskWords << '\n';
+ s << "\ttimestampFrequency = " << value.timestampFrequency << '\n';
+ s << "\tmaxClipDistances = " << value.maxClipDistances << '\n';
+ s << "\tmaxCullDistances = " << value.maxCullDistances << '\n';
+ s << "\tmaxCombinedClipAndCullDistances = " << value.maxCombinedClipAndCullDistances << '\n';
+ s << "\tpointSizeRange = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.pointSizeRange), DE_ARRAY_END(value.pointSizeRange)) << '\n';
+ s << "\tlineWidthRange = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.lineWidthRange), DE_ARRAY_END(value.lineWidthRange)) << '\n';
+ s << "\tpointSizeGranularity = " << value.pointSizeGranularity << '\n';
+ s << "\tlineWidthGranularity = " << value.lineWidthGranularity << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceProperties& value)
+{
+ s << "VkPhysicalDeviceProperties = {\n";
+ s << "\tapiVersion = " << value.apiVersion << '\n';
+ s << "\tdriverVersion = " << value.driverVersion << '\n';
+ s << "\tvendorId = " << value.vendorId << '\n';
+ s << "\tdeviceId = " << value.deviceId << '\n';
+ s << "\tdeviceType = " << value.deviceType << '\n';
+ s << "\tdeviceName = " << (const char*)value.deviceName << '\n';
+ s << "\tpipelineCacheUUID = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.pipelineCacheUUID), DE_ARRAY_END(value.pipelineCacheUUID)) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceQueueProperties& value)
+{
+ s << "VkPhysicalDeviceQueueProperties = {\n";
+ s << "\tqueueFlags = " << getQueueFlagsStr(value.queueFlags) << '\n';
+ s << "\tqueueCount = " << value.queueCount << '\n';
+ s << "\tsupportsTimestamps = " << value.supportsTimestamps << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkMemoryType& value)
+{
+ s << "VkMemoryType = {\n";
+ s << "\tpropertyFlags = " << getMemoryPropertyFlagsStr(value.propertyFlags) << '\n';
+ s << "\theapIndex = " << value.heapIndex << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkMemoryHeap& value)
+{
+ s << "VkMemoryHeap = {\n";
+ s << "\tsize = " << value.size << '\n';
+ s << "\tflags = " << getMemoryHeapFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceMemoryProperties& value)
+{
+ s << "VkPhysicalDeviceMemoryProperties = {\n";
+ s << "\tmemoryTypeCount = " << value.memoryTypeCount << '\n';
+ s << "\tmemoryTypes = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.memoryTypes), DE_ARRAY_END(value.memoryTypes)) << '\n';
+ s << "\tmemoryHeapCount = " << value.memoryHeapCount << '\n';
+ s << "\tmemoryHeaps = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.memoryHeaps), DE_ARRAY_END(value.memoryHeaps)) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDeviceQueueCreateInfo& value)
+{
+ s << "VkDeviceQueueCreateInfo = {\n";
+ s << "\tqueueFamilyIndex = " << value.queueFamilyIndex << '\n';
+ s << "\tqueueCount = " << value.queueCount << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDeviceCreateInfo& value)
+{
+ s << "VkDeviceCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tqueueRecordCount = " << value.queueRecordCount << '\n';
+ s << "\tpRequestedQueues = " << value.pRequestedQueues << '\n';
+ s << "\tlayerCount = " << value.layerCount << '\n';
+ s << "\tppEnabledLayerNames = " << value.ppEnabledLayerNames << '\n';
+ s << "\textensionCount = " << value.extensionCount << '\n';
+ s << "\tppEnabledExtensionNames = " << value.ppEnabledExtensionNames << '\n';
+ s << "\tpEnabledFeatures = " << value.pEnabledFeatures << '\n';
+ s << "\tflags = " << getDeviceCreateFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkExtensionProperties& value)
+{
+ s << "VkExtensionProperties = {\n";
+ s << "\textName = " << (const char*)value.extName << '\n';
+ s << "\tspecVersion = " << value.specVersion << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkLayerProperties& value)
+{
+ s << "VkLayerProperties = {\n";
+ s << "\tlayerName = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.layerName), DE_ARRAY_END(value.layerName)) << '\n';
+ s << "\tspecVersion = " << value.specVersion << '\n';
+ s << "\timplVersion = " << value.implVersion << '\n';
+ s << "\tdescription = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.description), DE_ARRAY_END(value.description)) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkMemoryAllocInfo& value)
+{
+ s << "VkMemoryAllocInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tallocationSize = " << value.allocationSize << '\n';
+ s << "\tmemoryTypeIndex = " << value.memoryTypeIndex << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkMappedMemoryRange& value)
+{
+ s << "VkMappedMemoryRange = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tmem = " << value.mem << '\n';
+ s << "\toffset = " << value.offset << '\n';
+ s << "\tsize = " << value.size << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkMemoryRequirements& value)
+{
+ s << "VkMemoryRequirements = {\n";
+ s << "\tsize = " << value.size << '\n';
+ s << "\talignment = " << value.alignment << '\n';
+ s << "\tmemoryTypeBits = " << value.memoryTypeBits << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkExtent3D& value)
+{
+ s << "VkExtent3D = {\n";
+ s << "\twidth = " << value.width << '\n';
+ s << "\theight = " << value.height << '\n';
+ s << "\tdepth = " << value.depth << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSparseImageFormatProperties& value)
+{
+ s << "VkSparseImageFormatProperties = {\n";
+ s << "\taspect = " << value.aspect << '\n';
+ s << "\timageGranularity = " << value.imageGranularity << '\n';
+ s << "\tflags = " << getSparseImageFormatFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryRequirements& value)
+{
+ s << "VkSparseImageMemoryRequirements = {\n";
+ s << "\tformatProps = " << value.formatProps << '\n';
+ s << "\timageMipTailStartLOD = " << value.imageMipTailStartLOD << '\n';
+ s << "\timageMipTailSize = " << value.imageMipTailSize << '\n';
+ s << "\timageMipTailOffset = " << value.imageMipTailOffset << '\n';
+ s << "\timageMipTailStride = " << value.imageMipTailStride << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSparseMemoryBindInfo& value)
+{
+ s << "VkSparseMemoryBindInfo = {\n";
+ s << "\trangeOffset = " << value.rangeOffset << '\n';
+ s << "\trangeSize = " << value.rangeSize << '\n';
+ s << "\tmemOffset = " << value.memOffset << '\n';
+ s << "\tmem = " << value.mem << '\n';
+ s << "\tflags = " << getSparseMemoryBindFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageSubresource& value)
+{
+ s << "VkImageSubresource = {\n";
+ s << "\taspect = " << value.aspect << '\n';
+ s << "\tmipLevel = " << value.mipLevel << '\n';
+ s << "\tarraySlice = " << value.arraySlice << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkOffset3D& value)
+{
+ s << "VkOffset3D = {\n";
+ s << "\tx = " << value.x << '\n';
+ s << "\ty = " << value.y << '\n';
+ s << "\tz = " << value.z << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSparseImageMemoryBindInfo& value)
+{
+ s << "VkSparseImageMemoryBindInfo = {\n";
+ s << "\tsubresource = " << value.subresource << '\n';
+ s << "\toffset = " << value.offset << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << "\tmemOffset = " << value.memOffset << '\n';
+ s << "\tmem = " << value.mem << '\n';
+ s << "\tflags = " << getSparseMemoryBindFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkFenceCreateInfo& value)
+{
+ s << "VkFenceCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tflags = " << getFenceCreateFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSemaphoreCreateInfo& value)
+{
+ s << "VkSemaphoreCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tflags = " << value.flags << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkEventCreateInfo& value)
+{
+ s << "VkEventCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tflags = " << value.flags << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkQueryPoolCreateInfo& value)
+{
+ s << "VkQueryPoolCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tqueryType = " << value.queryType << '\n';
+ s << "\tslots = " << value.slots << '\n';
+ s << "\tpipelineStatistics = " << getQueryPipelineStatisticFlagsStr(value.pipelineStatistics) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkBufferCreateInfo& value)
+{
+ s << "VkBufferCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tsize = " << value.size << '\n';
+ s << "\tusage = " << getBufferUsageFlagsStr(value.usage) << '\n';
+ s << "\tflags = " << getBufferCreateFlagsStr(value.flags) << '\n';
+ s << "\tsharingMode = " << value.sharingMode << '\n';
+ s << "\tqueueFamilyCount = " << value.queueFamilyCount << '\n';
+ s << "\tpQueueFamilyIndices = " << value.pQueueFamilyIndices << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkBufferViewCreateInfo& value)
+{
+ s << "VkBufferViewCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tbuffer = " << value.buffer << '\n';
+ s << "\tviewType = " << value.viewType << '\n';
+ s << "\tformat = " << value.format << '\n';
+ s << "\toffset = " << value.offset << '\n';
+ s << "\trange = " << value.range << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageCreateInfo& value)
+{
+ s << "VkImageCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\timageType = " << value.imageType << '\n';
+ s << "\tformat = " << value.format << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << "\tmipLevels = " << value.mipLevels << '\n';
+ s << "\tarraySize = " << value.arraySize << '\n';
+ s << "\tsamples = " << value.samples << '\n';
+ s << "\ttiling = " << value.tiling << '\n';
+ s << "\tusage = " << getImageUsageFlagsStr(value.usage) << '\n';
+ s << "\tflags = " << getImageCreateFlagsStr(value.flags) << '\n';
+ s << "\tsharingMode = " << value.sharingMode << '\n';
+ s << "\tqueueFamilyCount = " << value.queueFamilyCount << '\n';
+ s << "\tpQueueFamilyIndices = " << value.pQueueFamilyIndices << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSubresourceLayout& value)
+{
+ s << "VkSubresourceLayout = {\n";
+ s << "\toffset = " << value.offset << '\n';
+ s << "\tsize = " << value.size << '\n';
+ s << "\trowPitch = " << value.rowPitch << '\n';
+ s << "\tdepthPitch = " << value.depthPitch << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkChannelMapping& value)
+{
+ s << "VkChannelMapping = {\n";
+ s << "\tr = " << value.r << '\n';
+ s << "\tg = " << value.g << '\n';
+ s << "\tb = " << value.b << '\n';
+ s << "\ta = " << value.a << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageSubresourceRange& value)
+{
+ s << "VkImageSubresourceRange = {\n";
+ s << "\taspect = " << value.aspect << '\n';
+ s << "\tbaseMipLevel = " << value.baseMipLevel << '\n';
+ s << "\tmipLevels = " << value.mipLevels << '\n';
+ s << "\tbaseArraySlice = " << value.baseArraySlice << '\n';
+ s << "\tarraySize = " << value.arraySize << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageViewCreateInfo& value)
+{
+ s << "VkImageViewCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\timage = " << value.image << '\n';
+ s << "\tviewType = " << value.viewType << '\n';
+ s << "\tformat = " << value.format << '\n';
+ s << "\tchannels = " << value.channels << '\n';
+ s << "\tsubresourceRange = " << value.subresourceRange << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkAttachmentViewCreateInfo& value)
+{
+ s << "VkAttachmentViewCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\timage = " << value.image << '\n';
+ s << "\tformat = " << value.format << '\n';
+ s << "\tmipLevel = " << value.mipLevel << '\n';
+ s << "\tbaseArraySlice = " << value.baseArraySlice << '\n';
+ s << "\tarraySize = " << value.arraySize << '\n';
+ s << "\tflags = " << getAttachmentViewCreateFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkShaderModuleCreateInfo& value)
+{
+ s << "VkShaderModuleCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tcodeSize = " << value.codeSize << '\n';
+ s << "\tpCode = " << value.pCode << '\n';
+ s << "\tflags = " << value.flags << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkShaderCreateInfo& value)
+{
+ s << "VkShaderCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tmodule = " << value.module << '\n';
+ s << "\tpName = " << getCharPtrStr(value.pName) << '\n';
+ s << "\tflags = " << value.flags << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineCacheCreateInfo& value)
+{
+ s << "VkPipelineCacheCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tinitialSize = " << value.initialSize << '\n';
+ s << "\tinitialData = " << value.initialData << '\n';
+ s << "\tmaxSize = " << value.maxSize << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSpecializationMapEntry& value)
+{
+ s << "VkSpecializationMapEntry = {\n";
+ s << "\tconstantId = " << value.constantId << '\n';
+ s << "\tsize = " << value.size << '\n';
+ s << "\toffset = " << value.offset << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSpecializationInfo& value)
+{
+ s << "VkSpecializationInfo = {\n";
+ s << "\tmapEntryCount = " << value.mapEntryCount << '\n';
+ s << "\tpMap = " << value.pMap << '\n';
+ s << "\tdataSize = " << value.dataSize << '\n';
+ s << "\tpData = " << value.pData << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineShaderStageCreateInfo& value)
+{
+ s << "VkPipelineShaderStageCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tstage = " << value.stage << '\n';
+ s << "\tshader = " << value.shader << '\n';
+ s << "\tpSpecializationInfo = " << value.pSpecializationInfo << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkVertexInputBindingDescription& value)
+{
+ s << "VkVertexInputBindingDescription = {\n";
+ s << "\tbinding = " << value.binding << '\n';
+ s << "\tstrideInBytes = " << value.strideInBytes << '\n';
+ s << "\tstepRate = " << value.stepRate << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkVertexInputAttributeDescription& value)
+{
+ s << "VkVertexInputAttributeDescription = {\n";
+ s << "\tlocation = " << value.location << '\n';
+ s << "\tbinding = " << value.binding << '\n';
+ s << "\tformat = " << value.format << '\n';
+ s << "\toffsetInBytes = " << value.offsetInBytes << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineVertexInputStateCreateInfo& value)
+{
+ s << "VkPipelineVertexInputStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tbindingCount = " << value.bindingCount << '\n';
+ s << "\tpVertexBindingDescriptions = " << value.pVertexBindingDescriptions << '\n';
+ s << "\tattributeCount = " << value.attributeCount << '\n';
+ s << "\tpVertexAttributeDescriptions = " << value.pVertexAttributeDescriptions << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineInputAssemblyStateCreateInfo& value)
+{
+ s << "VkPipelineInputAssemblyStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\ttopology = " << value.topology << '\n';
+ s << "\tprimitiveRestartEnable = " << value.primitiveRestartEnable << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineTessellationStateCreateInfo& value)
+{
+ s << "VkPipelineTessellationStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tpatchControlPoints = " << value.patchControlPoints << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineViewportStateCreateInfo& value)
+{
+ s << "VkPipelineViewportStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tviewportCount = " << value.viewportCount << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineRasterStateCreateInfo& value)
+{
+ s << "VkPipelineRasterStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tdepthClipEnable = " << value.depthClipEnable << '\n';
+ s << "\trasterizerDiscardEnable = " << value.rasterizerDiscardEnable << '\n';
+ s << "\tfillMode = " << value.fillMode << '\n';
+ s << "\tcullMode = " << value.cullMode << '\n';
+ s << "\tfrontFace = " << value.frontFace << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineMultisampleStateCreateInfo& value)
+{
+ s << "VkPipelineMultisampleStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\trasterSamples = " << value.rasterSamples << '\n';
+ s << "\tsampleShadingEnable = " << value.sampleShadingEnable << '\n';
+ s << "\tminSampleShading = " << value.minSampleShading << '\n';
+ s << "\tsampleMask = " << value.sampleMask << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkStencilOpState& value)
+{
+ s << "VkStencilOpState = {\n";
+ s << "\tstencilFailOp = " << value.stencilFailOp << '\n';
+ s << "\tstencilPassOp = " << value.stencilPassOp << '\n';
+ s << "\tstencilDepthFailOp = " << value.stencilDepthFailOp << '\n';
+ s << "\tstencilCompareOp = " << value.stencilCompareOp << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineDepthStencilStateCreateInfo& value)
+{
+ s << "VkPipelineDepthStencilStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tdepthTestEnable = " << value.depthTestEnable << '\n';
+ s << "\tdepthWriteEnable = " << value.depthWriteEnable << '\n';
+ s << "\tdepthCompareOp = " << value.depthCompareOp << '\n';
+ s << "\tdepthBoundsEnable = " << value.depthBoundsEnable << '\n';
+ s << "\tstencilTestEnable = " << value.stencilTestEnable << '\n';
+ s << "\tfront = " << value.front << '\n';
+ s << "\tback = " << value.back << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineColorBlendAttachmentState& value)
+{
+ s << "VkPipelineColorBlendAttachmentState = {\n";
+ s << "\tblendEnable = " << value.blendEnable << '\n';
+ s << "\tsrcBlendColor = " << value.srcBlendColor << '\n';
+ s << "\tdestBlendColor = " << value.destBlendColor << '\n';
+ s << "\tblendOpColor = " << value.blendOpColor << '\n';
+ s << "\tsrcBlendAlpha = " << value.srcBlendAlpha << '\n';
+ s << "\tdestBlendAlpha = " << value.destBlendAlpha << '\n';
+ s << "\tblendOpAlpha = " << value.blendOpAlpha << '\n';
+ s << "\tchannelWriteMask = " << getChannelFlagsStr(value.channelWriteMask) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineColorBlendStateCreateInfo& value)
+{
+ s << "VkPipelineColorBlendStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\talphaToCoverageEnable = " << value.alphaToCoverageEnable << '\n';
+ s << "\tlogicOpEnable = " << value.logicOpEnable << '\n';
+ s << "\tlogicOp = " << value.logicOp << '\n';
+ s << "\tattachmentCount = " << value.attachmentCount << '\n';
+ s << "\tpAttachments = " << value.pAttachments << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkGraphicsPipelineCreateInfo& value)
+{
+ s << "VkGraphicsPipelineCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tstageCount = " << value.stageCount << '\n';
+ s << "\tpStages = " << value.pStages << '\n';
+ s << "\tpVertexInputState = " << value.pVertexInputState << '\n';
+ s << "\tpInputAssemblyState = " << value.pInputAssemblyState << '\n';
+ s << "\tpTessellationState = " << value.pTessellationState << '\n';
+ s << "\tpViewportState = " << value.pViewportState << '\n';
+ s << "\tpRasterState = " << value.pRasterState << '\n';
+ s << "\tpMultisampleState = " << value.pMultisampleState << '\n';
+ s << "\tpDepthStencilState = " << value.pDepthStencilState << '\n';
+ s << "\tpColorBlendState = " << value.pColorBlendState << '\n';
+ s << "\tflags = " << getPipelineCreateFlagsStr(value.flags) << '\n';
+ s << "\tlayout = " << value.layout << '\n';
+ s << "\trenderPass = " << value.renderPass << '\n';
+ s << "\tsubpass = " << value.subpass << '\n';
+ s << "\tbasePipelineHandle = " << value.basePipelineHandle << '\n';
+ s << "\tbasePipelineIndex = " << value.basePipelineIndex << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkComputePipelineCreateInfo& value)
+{
+ s << "VkComputePipelineCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tcs = " << value.cs << '\n';
+ s << "\tflags = " << getPipelineCreateFlagsStr(value.flags) << '\n';
+ s << "\tlayout = " << value.layout << '\n';
+ s << "\tbasePipelineHandle = " << value.basePipelineHandle << '\n';
+ s << "\tbasePipelineIndex = " << value.basePipelineIndex << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPushConstantRange& value)
+{
+ s << "VkPushConstantRange = {\n";
+ s << "\tstageFlags = " << getShaderStageFlagsStr(value.stageFlags) << '\n';
+ s << "\tstart = " << value.start << '\n';
+ s << "\tlength = " << value.length << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPipelineLayoutCreateInfo& value)
+{
+ s << "VkPipelineLayoutCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tdescriptorSetCount = " << value.descriptorSetCount << '\n';
+ s << "\tpSetLayouts = " << value.pSetLayouts << '\n';
+ s << "\tpushConstantRangeCount = " << value.pushConstantRangeCount << '\n';
+ s << "\tpPushConstantRanges = " << value.pPushConstantRanges << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSamplerCreateInfo& value)
+{
+ s << "VkSamplerCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tmagFilter = " << value.magFilter << '\n';
+ s << "\tminFilter = " << value.minFilter << '\n';
+ s << "\tmipMode = " << value.mipMode << '\n';
+ s << "\taddressU = " << value.addressU << '\n';
+ s << "\taddressV = " << value.addressV << '\n';
+ s << "\taddressW = " << value.addressW << '\n';
+ s << "\tmipLodBias = " << value.mipLodBias << '\n';
+ s << "\tmaxAnisotropy = " << value.maxAnisotropy << '\n';
+ s << "\tcompareEnable = " << value.compareEnable << '\n';
+ s << "\tcompareOp = " << value.compareOp << '\n';
+ s << "\tminLod = " << value.minLod << '\n';
+ s << "\tmaxLod = " << value.maxLod << '\n';
+ s << "\tborderColor = " << value.borderColor << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDescriptorSetLayoutBinding& value)
+{
+ s << "VkDescriptorSetLayoutBinding = {\n";
+ s << "\tdescriptorType = " << value.descriptorType << '\n';
+ s << "\tarraySize = " << value.arraySize << '\n';
+ s << "\tstageFlags = " << getShaderStageFlagsStr(value.stageFlags) << '\n';
+ s << "\tpImmutableSamplers = " << value.pImmutableSamplers << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDescriptorSetLayoutCreateInfo& value)
+{
+ s << "VkDescriptorSetLayoutCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tcount = " << value.count << '\n';
+ s << "\tpBinding = " << value.pBinding << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDescriptorTypeCount& value)
+{
+ s << "VkDescriptorTypeCount = {\n";
+ s << "\ttype = " << value.type << '\n';
+ s << "\tcount = " << value.count << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDescriptorPoolCreateInfo& value)
+{
+ s << "VkDescriptorPoolCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tcount = " << value.count << '\n';
+ s << "\tpTypeCount = " << value.pTypeCount << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDescriptorInfo& value)
+{
+ s << "VkDescriptorInfo = {\n";
+ s << "\tbufferView = " << value.bufferView << '\n';
+ s << "\tsampler = " << value.sampler << '\n';
+ s << "\timageView = " << value.imageView << '\n';
+ s << "\tattachmentView = " << value.attachmentView << '\n';
+ s << "\timageLayout = " << value.imageLayout << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkWriteDescriptorSet& value)
+{
+ s << "VkWriteDescriptorSet = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tdestSet = " << value.destSet << '\n';
+ s << "\tdestBinding = " << value.destBinding << '\n';
+ s << "\tdestArrayElement = " << value.destArrayElement << '\n';
+ s << "\tcount = " << value.count << '\n';
+ s << "\tdescriptorType = " << value.descriptorType << '\n';
+ s << "\tpDescriptors = " << value.pDescriptors << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkCopyDescriptorSet& value)
+{
+ s << "VkCopyDescriptorSet = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tsrcSet = " << value.srcSet << '\n';
+ s << "\tsrcBinding = " << value.srcBinding << '\n';
+ s << "\tsrcArrayElement = " << value.srcArrayElement << '\n';
+ s << "\tdestSet = " << value.destSet << '\n';
+ s << "\tdestBinding = " << value.destBinding << '\n';
+ s << "\tdestArrayElement = " << value.destArrayElement << '\n';
+ s << "\tcount = " << value.count << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkViewport& value)
+{
+ s << "VkViewport = {\n";
+ s << "\toriginX = " << value.originX << '\n';
+ s << "\toriginY = " << value.originY << '\n';
+ s << "\twidth = " << value.width << '\n';
+ s << "\theight = " << value.height << '\n';
+ s << "\tminDepth = " << value.minDepth << '\n';
+ s << "\tmaxDepth = " << value.maxDepth << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkOffset2D& value)
+{
+ s << "VkOffset2D = {\n";
+ s << "\tx = " << value.x << '\n';
+ s << "\ty = " << value.y << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkExtent2D& value)
+{
+ s << "VkExtent2D = {\n";
+ s << "\twidth = " << value.width << '\n';
+ s << "\theight = " << value.height << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkRect2D& value)
+{
+ s << "VkRect2D = {\n";
+ s << "\toffset = " << value.offset << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDynamicViewportStateCreateInfo& value)
+{
+ s << "VkDynamicViewportStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tviewportAndScissorCount = " << value.viewportAndScissorCount << '\n';
+ s << "\tpViewports = " << value.pViewports << '\n';
+ s << "\tpScissors = " << value.pScissors << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDynamicRasterStateCreateInfo& value)
+{
+ s << "VkDynamicRasterStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tdepthBias = " << value.depthBias << '\n';
+ s << "\tdepthBiasClamp = " << value.depthBiasClamp << '\n';
+ s << "\tslopeScaledDepthBias = " << value.slopeScaledDepthBias << '\n';
+ s << "\tlineWidth = " << value.lineWidth << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDynamicColorBlendStateCreateInfo& value)
+{
+ s << "VkDynamicColorBlendStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tblendConst = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.blendConst), DE_ARRAY_END(value.blendConst)) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDynamicDepthStencilStateCreateInfo& value)
+{
+ s << "VkDynamicDepthStencilStateCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tminDepthBounds = " << value.minDepthBounds << '\n';
+ s << "\tmaxDepthBounds = " << value.maxDepthBounds << '\n';
+ s << "\tstencilReadMask = " << value.stencilReadMask << '\n';
+ s << "\tstencilWriteMask = " << value.stencilWriteMask << '\n';
+ s << "\tstencilFrontRef = " << value.stencilFrontRef << '\n';
+ s << "\tstencilBackRef = " << value.stencilBackRef << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkAttachmentBindInfo& value)
+{
+ s << "VkAttachmentBindInfo = {\n";
+ s << "\tview = " << value.view << '\n';
+ s << "\tlayout = " << value.layout << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkFramebufferCreateInfo& value)
+{
+ s << "VkFramebufferCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\trenderPass = " << value.renderPass << '\n';
+ s << "\tattachmentCount = " << value.attachmentCount << '\n';
+ s << "\tpAttachments = " << value.pAttachments << '\n';
+ s << "\twidth = " << value.width << '\n';
+ s << "\theight = " << value.height << '\n';
+ s << "\tlayers = " << value.layers << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkAttachmentDescription& value)
+{
+ s << "VkAttachmentDescription = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tformat = " << value.format << '\n';
+ s << "\tsamples = " << value.samples << '\n';
+ s << "\tloadOp = " << value.loadOp << '\n';
+ s << "\tstoreOp = " << value.storeOp << '\n';
+ s << "\tstencilLoadOp = " << value.stencilLoadOp << '\n';
+ s << "\tstencilStoreOp = " << value.stencilStoreOp << '\n';
+ s << "\tinitialLayout = " << value.initialLayout << '\n';
+ s << "\tfinalLayout = " << value.finalLayout << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkAttachmentReference& value)
+{
+ s << "VkAttachmentReference = {\n";
+ s << "\tattachment = " << value.attachment << '\n';
+ s << "\tlayout = " << value.layout << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSubpassDescription& value)
+{
+ s << "VkSubpassDescription = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tpipelineBindPoint = " << value.pipelineBindPoint << '\n';
+ s << "\tflags = " << getSubpassDescriptionFlagsStr(value.flags) << '\n';
+ s << "\tinputCount = " << value.inputCount << '\n';
+ s << "\tinputAttachments = " << value.inputAttachments << '\n';
+ s << "\tcolorCount = " << value.colorCount << '\n';
+ s << "\tcolorAttachments = " << value.colorAttachments << '\n';
+ s << "\tresolveAttachments = " << value.resolveAttachments << '\n';
+ s << "\tdepthStencilAttachment = " << value.depthStencilAttachment << '\n';
+ s << "\tpreserveCount = " << value.preserveCount << '\n';
+ s << "\tpreserveAttachments = " << value.preserveAttachments << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkSubpassDependency& value)
+{
+ s << "VkSubpassDependency = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tsrcSubpass = " << value.srcSubpass << '\n';
+ s << "\tdestSubpass = " << value.destSubpass << '\n';
+ s << "\tsrcStageMask = " << getPipelineStageFlagsStr(value.srcStageMask) << '\n';
+ s << "\tdestStageMask = " << getPipelineStageFlagsStr(value.destStageMask) << '\n';
+ s << "\toutputMask = " << getMemoryOutputFlagsStr(value.outputMask) << '\n';
+ s << "\tinputMask = " << getMemoryInputFlagsStr(value.inputMask) << '\n';
+ s << "\tbyRegion = " << value.byRegion << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkRenderPassCreateInfo& value)
+{
+ s << "VkRenderPassCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tattachmentCount = " << value.attachmentCount << '\n';
+ s << "\tpAttachments = " << value.pAttachments << '\n';
+ s << "\tsubpassCount = " << value.subpassCount << '\n';
+ s << "\tpSubpasses = " << value.pSubpasses << '\n';
+ s << "\tdependencyCount = " << value.dependencyCount << '\n';
+ s << "\tpDependencies = " << value.pDependencies << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkCmdPoolCreateInfo& value)
+{
+ s << "VkCmdPoolCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tqueueFamilyIndex = " << value.queueFamilyIndex << '\n';
+ s << "\tflags = " << getCmdPoolCreateFlagsStr(value.flags) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkCmdBufferCreateInfo& value)
+{
+ s << "VkCmdBufferCreateInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tcmdPool = " << value.cmdPool << '\n';
+ s << "\tlevel = " << value.level << '\n';
+ s << "\tflags = " << value.flags << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkCmdBufferBeginInfo& value)
+{
+ s << "VkCmdBufferBeginInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tflags = " << getCmdBufferOptimizeFlagsStr(value.flags) << '\n';
+ s << "\trenderPass = " << value.renderPass << '\n';
+ s << "\tframebuffer = " << value.framebuffer << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkBufferCopy& value)
+{
+ s << "VkBufferCopy = {\n";
+ s << "\tsrcOffset = " << value.srcOffset << '\n';
+ s << "\tdestOffset = " << value.destOffset << '\n';
+ s << "\tcopySize = " << value.copySize << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageCopy& value)
+{
+ s << "VkImageCopy = {\n";
+ s << "\tsrcSubresource = " << value.srcSubresource << '\n';
+ s << "\tsrcOffset = " << value.srcOffset << '\n';
+ s << "\tdestSubresource = " << value.destSubresource << '\n';
+ s << "\tdestOffset = " << value.destOffset << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageBlit& value)
+{
+ s << "VkImageBlit = {\n";
+ s << "\tsrcSubresource = " << value.srcSubresource << '\n';
+ s << "\tsrcOffset = " << value.srcOffset << '\n';
+ s << "\tsrcExtent = " << value.srcExtent << '\n';
+ s << "\tdestSubresource = " << value.destSubresource << '\n';
+ s << "\tdestOffset = " << value.destOffset << '\n';
+ s << "\tdestExtent = " << value.destExtent << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkBufferImageCopy& value)
+{
+ s << "VkBufferImageCopy = {\n";
+ s << "\tbufferOffset = " << value.bufferOffset << '\n';
+ s << "\tbufferRowLength = " << value.bufferRowLength << '\n';
+ s << "\tbufferImageHeight = " << value.bufferImageHeight << '\n';
+ s << "\timageSubresource = " << value.imageSubresource << '\n';
+ s << "\timageOffset = " << value.imageOffset << '\n';
+ s << "\timageExtent = " << value.imageExtent << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkClearColorValue& value)
+{
+ s << "VkClearColorValue = {\n";
+ s << "\tf32 = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.f32), DE_ARRAY_END(value.f32)) << '\n';
+ s << "\ts32 = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.s32), DE_ARRAY_END(value.s32)) << '\n';
+ s << "\tu32 = " << '\n' << tcu::formatArray(DE_ARRAY_BEGIN(value.u32), DE_ARRAY_END(value.u32)) << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkRect3D& value)
+{
+ s << "VkRect3D = {\n";
+ s << "\toffset = " << value.offset << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageResolve& value)
+{
+ s << "VkImageResolve = {\n";
+ s << "\tsrcSubresource = " << value.srcSubresource << '\n';
+ s << "\tsrcOffset = " << value.srcOffset << '\n';
+ s << "\tdestSubresource = " << value.destSubresource << '\n';
+ s << "\tdestOffset = " << value.destOffset << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkClearDepthStencilValue& value)
+{
+ s << "VkClearDepthStencilValue = {\n";
+ s << "\tdepth = " << value.depth << '\n';
+ s << "\tstencil = " << value.stencil << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkClearValue& value)
+{
+ s << "VkClearValue = {\n";
+ s << "\tcolor = " << value.color << '\n';
+ s << "\tds = " << value.ds << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkRenderPassBeginInfo& value)
+{
+ s << "VkRenderPassBeginInfo = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\trenderPass = " << value.renderPass << '\n';
+ s << "\tframebuffer = " << value.framebuffer << '\n';
+ s << "\trenderArea = " << value.renderArea << '\n';
+ s << "\tattachmentCount = " << value.attachmentCount << '\n';
+ s << "\tpAttachmentClearValues = " << value.pAttachmentClearValues << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkBufferMemoryBarrier& value)
+{
+ s << "VkBufferMemoryBarrier = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\toutputMask = " << getMemoryOutputFlagsStr(value.outputMask) << '\n';
+ s << "\tinputMask = " << getMemoryInputFlagsStr(value.inputMask) << '\n';
+ s << "\tsrcQueueFamilyIndex = " << value.srcQueueFamilyIndex << '\n';
+ s << "\tdestQueueFamilyIndex = " << value.destQueueFamilyIndex << '\n';
+ s << "\tbuffer = " << value.buffer << '\n';
+ s << "\toffset = " << value.offset << '\n';
+ s << "\tsize = " << value.size << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDispatchIndirectCmd& value)
+{
+ s << "VkDispatchIndirectCmd = {\n";
+ s << "\tx = " << value.x << '\n';
+ s << "\ty = " << value.y << '\n';
+ s << "\tz = " << value.z << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDrawIndexedIndirectCmd& value)
+{
+ s << "VkDrawIndexedIndirectCmd = {\n";
+ s << "\tindexCount = " << value.indexCount << '\n';
+ s << "\tinstanceCount = " << value.instanceCount << '\n';
+ s << "\tfirstIndex = " << value.firstIndex << '\n';
+ s << "\tvertexOffset = " << value.vertexOffset << '\n';
+ s << "\tfirstInstance = " << value.firstInstance << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkDrawIndirectCmd& value)
+{
+ s << "VkDrawIndirectCmd = {\n";
+ s << "\tvertexCount = " << value.vertexCount << '\n';
+ s << "\tinstanceCount = " << value.instanceCount << '\n';
+ s << "\tfirstVertex = " << value.firstVertex << '\n';
+ s << "\tfirstInstance = " << value.firstInstance << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkImageMemoryBarrier& value)
+{
+ s << "VkImageMemoryBarrier = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\toutputMask = " << getMemoryOutputFlagsStr(value.outputMask) << '\n';
+ s << "\tinputMask = " << getMemoryInputFlagsStr(value.inputMask) << '\n';
+ s << "\toldLayout = " << value.oldLayout << '\n';
+ s << "\tnewLayout = " << value.newLayout << '\n';
+ s << "\tsrcQueueFamilyIndex = " << value.srcQueueFamilyIndex << '\n';
+ s << "\tdestQueueFamilyIndex = " << value.destQueueFamilyIndex << '\n';
+ s << "\timage = " << value.image << '\n';
+ s << "\tsubresourceRange = " << value.subresourceRange << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkMemoryBarrier& value)
+{
+ s << "VkMemoryBarrier = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\toutputMask = " << getMemoryOutputFlagsStr(value.outputMask) << '\n';
+ s << "\tinputMask = " << getMemoryInputFlagsStr(value.inputMask) << '\n';
+ s << '}';
+ return s;
+}
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+struct VkApplicationInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ const char* pAppName;
+ deUint32 appVersion;
+ const char* pEngineName;
+ deUint32 engineVersion;
+ deUint32 apiVersion;
+};
+
+struct VkAllocCallbacks
+{
+ void* pUserData;
+ PFN_vkAllocFunction pfnAlloc;
+ PFN_vkFreeFunction pfnFree;
+};
+
+struct VkInstanceCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ const VkApplicationInfo* pAppInfo;
+ const VkAllocCallbacks* pAllocCb;
+ deUint32 layerCount;
+ const char*const* ppEnabledLayerNames;
+ deUint32 extensionCount;
+ const char*const* ppEnabledExtensionNames;
+};
+
+struct VkPhysicalDeviceFeatures
+{
+ VkBool32 robustBufferAccess;
+ VkBool32 fullDrawIndexUint32;
+ VkBool32 imageCubeArray;
+ VkBool32 independentBlend;
+ VkBool32 geometryShader;
+ VkBool32 tessellationShader;
+ VkBool32 sampleRateShading;
+ VkBool32 dualSourceBlend;
+ VkBool32 logicOp;
+ VkBool32 instancedDrawIndirect;
+ VkBool32 depthClip;
+ VkBool32 depthBiasClamp;
+ VkBool32 fillModeNonSolid;
+ VkBool32 depthBounds;
+ VkBool32 wideLines;
+ VkBool32 largePoints;
+ VkBool32 textureCompressionETC2;
+ VkBool32 textureCompressionASTC_LDR;
+ VkBool32 textureCompressionBC;
+ VkBool32 pipelineStatisticsQuery;
+ VkBool32 vertexSideEffects;
+ VkBool32 tessellationSideEffects;
+ VkBool32 geometrySideEffects;
+ VkBool32 fragmentSideEffects;
+ VkBool32 shaderTessellationPointSize;
+ VkBool32 shaderGeometryPointSize;
+ VkBool32 shaderTextureGatherExtended;
+ VkBool32 shaderStorageImageExtendedFormats;
+ VkBool32 shaderStorageImageMultisample;
+ VkBool32 shaderStorageBufferArrayConstantIndexing;
+ VkBool32 shaderStorageImageArrayConstantIndexing;
+ VkBool32 shaderUniformBufferArrayDynamicIndexing;
+ VkBool32 shaderSampledImageArrayDynamicIndexing;
+ VkBool32 shaderStorageBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageImageArrayDynamicIndexing;
+ VkBool32 shaderClipDistance;
+ VkBool32 shaderCullDistance;
+ VkBool32 shaderFloat64;
+ VkBool32 shaderInt64;
+ VkBool32 shaderFloat16;
+ VkBool32 shaderInt16;
+ VkBool32 shaderResourceResidency;
+ VkBool32 shaderResourceMinLOD;
+ VkBool32 sparse;
+ VkBool32 sparseResidencyBuffer;
+ VkBool32 sparseResidencyImage2D;
+ VkBool32 sparseResidencyImage3D;
+ VkBool32 sparseResidency2Samples;
+ VkBool32 sparseResidency4Samples;
+ VkBool32 sparseResidency8Samples;
+ VkBool32 sparseResidency16Samples;
+ VkBool32 sparseResidencyStandard2DBlockShape;
+ VkBool32 sparseResidencyStandard2DMSBlockShape;
+ VkBool32 sparseResidencyStandard3DBlockShape;
+ VkBool32 sparseResidencyAlignedMipSize;
+ VkBool32 sparseResidencyNonResident;
+ VkBool32 sparseResidencyNonResidentStrict;
+ VkBool32 sparseResidencyAliased;
+};
+
+struct VkFormatProperties
+{
+ VkFormatFeatureFlags linearTilingFeatures;
+ VkFormatFeatureFlags optimalTilingFeatures;
+};
+
+struct VkImageFormatProperties
+{
+ deUint64 maxResourceSize;
+ deUint32 maxSamples;
+};
+
+struct VkPhysicalDeviceLimits
+{
+ deUint32 maxImageDimension1D;
+ deUint32 maxImageDimension2D;
+ deUint32 maxImageDimension3D;
+ deUint32 maxImageDimensionCube;
+ deUint32 maxImageArrayLayers;
+ deUint32 maxTexelBufferSize;
+ deUint32 maxUniformBufferSize;
+ deUint32 maxStorageBufferSize;
+ deUint32 maxPushConstantsSize;
+ deUint32 maxMemoryAllocationCount;
+ VkDeviceSize bufferImageGranularity;
+ deUint32 maxBoundDescriptorSets;
+ deUint32 maxDescriptorSets;
+ deUint32 maxPerStageDescriptorSamplers;
+ deUint32 maxPerStageDescriptorUniformBuffers;
+ deUint32 maxPerStageDescriptorStorageBuffers;
+ deUint32 maxPerStageDescriptorSampledImages;
+ deUint32 maxPerStageDescriptorStorageImages;
+ deUint32 maxDescriptorSetSamplers;
+ deUint32 maxDescriptorSetUniformBuffers;
+ deUint32 maxDescriptorSetStorageBuffers;
+ deUint32 maxDescriptorSetSampledImages;
+ deUint32 maxDescriptorSetStorageImages;
+ deUint32 maxVertexInputAttributes;
+ deUint32 maxVertexInputAttributeOffset;
+ deUint32 maxVertexInputBindingStride;
+ deUint32 maxVertexOutputComponents;
+ deUint32 maxTessGenLevel;
+ deUint32 maxTessPatchSize;
+ deUint32 maxTessControlPerVertexInputComponents;
+ deUint32 maxTessControlPerVertexOutputComponents;
+ deUint32 maxTessControlPerPatchOutputComponents;
+ deUint32 maxTessControlTotalOutputComponents;
+ deUint32 maxTessEvaluationInputComponents;
+ deUint32 maxTessEvaluationOutputComponents;
+ deUint32 maxGeometryShaderInvocations;
+ deUint32 maxGeometryInputComponents;
+ deUint32 maxGeometryOutputComponents;
+ deUint32 maxGeometryOutputVertices;
+ deUint32 maxGeometryTotalOutputComponents;
+ deUint32 maxFragmentInputComponents;
+ deUint32 maxFragmentOutputBuffers;
+ deUint32 maxFragmentDualSourceBuffers;
+ deUint32 maxFragmentCombinedOutputResources;
+ deUint32 maxComputeSharedMemorySize;
+ deUint32 maxComputeWorkGroupCount[3];
+ deUint32 maxComputeWorkGroupInvocations;
+ deUint32 maxComputeWorkGroupSize[3];
+ deUint32 subPixelPrecisionBits;
+ deUint32 subTexelPrecisionBits;
+ deUint32 mipmapPrecisionBits;
+ deUint32 maxDrawIndexedIndexValue;
+ deUint32 maxDrawIndirectInstanceCount;
+ VkBool32 primitiveRestartForPatches;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ deUint32 maxViewports;
+ deUint32 maxDynamicViewportStates;
+ deUint32 maxViewportDimensions[2];
+ float viewportBoundsRange[2];
+ deUint32 viewportSubPixelBits;
+ deUint32 minMemoryMapAlignment;
+ deUint32 minTexelBufferOffsetAlignment;
+ deUint32 minUniformBufferOffsetAlignment;
+ deUint32 minStorageBufferOffsetAlignment;
+ deUint32 minTexelOffset;
+ deUint32 maxTexelOffset;
+ deUint32 minTexelGatherOffset;
+ deUint32 maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ deUint32 subPixelInterpolationOffsetBits;
+ deUint32 maxFramebufferWidth;
+ deUint32 maxFramebufferHeight;
+ deUint32 maxFramebufferLayers;
+ deUint32 maxFramebufferColorSamples;
+ deUint32 maxFramebufferDepthSamples;
+ deUint32 maxFramebufferStencilSamples;
+ deUint32 maxColorAttachments;
+ deUint32 maxSampledImageColorSamples;
+ deUint32 maxSampledImageDepthSamples;
+ deUint32 maxSampledImageIntegerSamples;
+ deUint32 maxStorageImageSamples;
+ deUint32 maxSampleMaskWords;
+ deUint64 timestampFrequency;
+ deUint32 maxClipDistances;
+ deUint32 maxCullDistances;
+ deUint32 maxCombinedClipAndCullDistances;
+ float pointSizeRange[2];
+ float lineWidthRange[2];
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+};
+
+struct VkPhysicalDeviceProperties
+{
+ deUint32 apiVersion;
+ deUint32 driverVersion;
+ deUint32 vendorId;
+ deUint32 deviceId;
+ VkPhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME];
+ deUint8 pipelineCacheUUID[VK_UUID_LENGTH];
+};
+
+struct VkPhysicalDeviceQueueProperties
+{
+ VkQueueFlags queueFlags;
+ deUint32 queueCount;
+ VkBool32 supportsTimestamps;
+};
+
+struct VkMemoryType
+{
+ VkMemoryPropertyFlags propertyFlags;
+ deUint32 heapIndex;
+};
+
+struct VkMemoryHeap
+{
+ VkDeviceSize size;
+ VkMemoryHeapFlags flags;
+};
+
+struct VkPhysicalDeviceMemoryProperties
+{
+ deUint32 memoryTypeCount;
+ VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
+ deUint32 memoryHeapCount;
+ VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS];
+};
+
+struct VkDeviceQueueCreateInfo
+{
+ deUint32 queueFamilyIndex;
+ deUint32 queueCount;
+};
+
+struct VkDeviceCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 queueRecordCount;
+ const VkDeviceQueueCreateInfo* pRequestedQueues;
+ deUint32 layerCount;
+ const char*const* ppEnabledLayerNames;
+ deUint32 extensionCount;
+ const char*const* ppEnabledExtensionNames;
+ const VkPhysicalDeviceFeatures* pEnabledFeatures;
+ VkDeviceCreateFlags flags;
+};
+
+struct VkExtensionProperties
+{
+ char extName[VK_MAX_EXTENSION_NAME];
+ deUint32 specVersion;
+};
+
+struct VkLayerProperties
+{
+ char layerName[VK_MAX_EXTENSION_NAME];
+ deUint32 specVersion;
+ deUint32 implVersion;
+ char description[VK_MAX_DESCRIPTION];
+};
+
+struct VkMemoryAllocInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize allocationSize;
+ deUint32 memoryTypeIndex;
+};
+
+struct VkMappedMemoryRange
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory mem;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+};
+
+struct VkMemoryRequirements
+{
+ VkDeviceSize size;
+ VkDeviceSize alignment;
+ deUint32 memoryTypeBits;
+};
+
+struct VkExtent3D
+{
+ deInt32 width;
+ deInt32 height;
+ deInt32 depth;
+};
+
+struct VkSparseImageFormatProperties
+{
+ VkImageAspect aspect;
+ VkExtent3D imageGranularity;
+ VkSparseImageFormatFlags flags;
+};
+
+struct VkSparseImageMemoryRequirements
+{
+ VkSparseImageFormatProperties formatProps;
+ deUint32 imageMipTailStartLOD;
+ VkDeviceSize imageMipTailSize;
+ VkDeviceSize imageMipTailOffset;
+ VkDeviceSize imageMipTailStride;
+};
+
+struct VkSparseMemoryBindInfo
+{
+ VkDeviceSize rangeOffset;
+ VkDeviceSize rangeSize;
+ VkDeviceSize memOffset;
+ VkDeviceMemory mem;
+ VkSparseMemoryBindFlags flags;
+};
+
+struct VkImageSubresource
+{
+ VkImageAspect aspect;
+ deUint32 mipLevel;
+ deUint32 arraySlice;
+};
+
+struct VkOffset3D
+{
+ deInt32 x;
+ deInt32 y;
+ deInt32 z;
+};
+
+struct VkSparseImageMemoryBindInfo
+{
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+ VkDeviceSize memOffset;
+ VkDeviceMemory mem;
+ VkSparseMemoryBindFlags flags;
+};
+
+struct VkFenceCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkFenceCreateFlags flags;
+};
+
+struct VkSemaphoreCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreCreateFlags flags;
+};
+
+struct VkEventCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkEventCreateFlags flags;
+};
+
+struct VkQueryPoolCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkQueryType queryType;
+ deUint32 slots;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+};
+
+struct VkBufferCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize size;
+ VkBufferUsageFlags usage;
+ VkBufferCreateFlags flags;
+ VkSharingMode sharingMode;
+ deUint32 queueFamilyCount;
+ const deUint32* pQueueFamilyIndices;
+};
+
+struct VkBufferViewCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer buffer;
+ VkBufferViewType viewType;
+ VkFormat format;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+};
+
+struct VkImageCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
+ deUint32 mipLevels;
+ deUint32 arraySize;
+ deUint32 samples;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkImageCreateFlags flags;
+ VkSharingMode sharingMode;
+ deUint32 queueFamilyCount;
+ const deUint32* pQueueFamilyIndices;
+};
+
+struct VkSubresourceLayout
+{
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ VkDeviceSize rowPitch;
+ VkDeviceSize depthPitch;
+};
+
+struct VkChannelMapping
+{
+ VkChannelSwizzle r;
+ VkChannelSwizzle g;
+ VkChannelSwizzle b;
+ VkChannelSwizzle a;
+};
+
+struct VkImageSubresourceRange
+{
+ VkImageAspect aspect;
+ deUint32 baseMipLevel;
+ deUint32 mipLevels;
+ deUint32 baseArraySlice;
+ deUint32 arraySize;
+};
+
+struct VkImageViewCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkChannelMapping channels;
+ VkImageSubresourceRange subresourceRange;
+};
+
+struct VkAttachmentViewCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkImage image;
+ VkFormat format;
+ deUint32 mipLevel;
+ deUint32 baseArraySlice;
+ deUint32 arraySize;
+ VkAttachmentViewCreateFlags flags;
+};
+
+struct VkShaderModuleCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUintptr codeSize;
+ const void* pCode;
+ VkShaderModuleCreateFlags flags;
+};
+
+struct VkShaderCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderModule module;
+ const char* pName;
+ VkShaderCreateFlags flags;
+};
+
+struct VkPipelineCacheCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUintptr initialSize;
+ const void* initialData;
+ deUintptr maxSize;
+};
+
+struct VkSpecializationMapEntry
+{
+ deUint32 constantId;
+ deUintptr size;
+ deUint32 offset;
+};
+
+struct VkSpecializationInfo
+{
+ deUint32 mapEntryCount;
+ const VkSpecializationMapEntry* pMap;
+ deUintptr dataSize;
+ const void* pData;
+};
+
+struct VkPipelineShaderStageCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderStage stage;
+ VkShader shader;
+ const VkSpecializationInfo* pSpecializationInfo;
+};
+
+struct VkVertexInputBindingDescription
+{
+ deUint32 binding;
+ deUint32 strideInBytes;
+ VkVertexInputStepRate stepRate;
+};
+
+struct VkVertexInputAttributeDescription
+{
+ deUint32 location;
+ deUint32 binding;
+ VkFormat format;
+ deUint32 offsetInBytes;
+};
+
+struct VkPipelineVertexInputStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 bindingCount;
+ const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ deUint32 attributeCount;
+ const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+};
+
+struct VkPipelineInputAssemblyStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkPrimitiveTopology topology;
+ VkBool32 primitiveRestartEnable;
+};
+
+struct VkPipelineTessellationStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 patchControlPoints;
+};
+
+struct VkPipelineViewportStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 viewportCount;
+};
+
+struct VkPipelineRasterStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 depthClipEnable;
+ VkBool32 rasterizerDiscardEnable;
+ VkFillMode fillMode;
+ VkCullMode cullMode;
+ VkFrontFace frontFace;
+};
+
+struct VkPipelineMultisampleStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 rasterSamples;
+ VkBool32 sampleShadingEnable;
+ float minSampleShading;
+ VkSampleMask sampleMask;
+};
+
+struct VkStencilOpState
+{
+ VkStencilOp stencilFailOp;
+ VkStencilOp stencilPassOp;
+ VkStencilOp stencilDepthFailOp;
+ VkCompareOp stencilCompareOp;
+};
+
+struct VkPipelineDepthStencilStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 depthTestEnable;
+ VkBool32 depthWriteEnable;
+ VkCompareOp depthCompareOp;
+ VkBool32 depthBoundsEnable;
+ VkBool32 stencilTestEnable;
+ VkStencilOpState front;
+ VkStencilOpState back;
+};
+
+struct VkPipelineColorBlendAttachmentState
+{
+ VkBool32 blendEnable;
+ VkBlend srcBlendColor;
+ VkBlend destBlendColor;
+ VkBlendOp blendOpColor;
+ VkBlend srcBlendAlpha;
+ VkBlend destBlendAlpha;
+ VkBlendOp blendOpAlpha;
+ VkChannelFlags channelWriteMask;
+};
+
+struct VkPipelineColorBlendStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 alphaToCoverageEnable;
+ VkBool32 logicOpEnable;
+ VkLogicOp logicOp;
+ deUint32 attachmentCount;
+ const VkPipelineColorBlendAttachmentState* pAttachments;
+};
+
+struct VkGraphicsPipelineCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ const VkPipelineViewportStateCreateInfo* pViewportState;
+ const VkPipelineRasterStateCreateInfo* pRasterState;
+ const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ VkPipelineCreateFlags flags;
+ VkPipelineLayout layout;
+ VkRenderPass renderPass;
+ deUint32 subpass;
+ VkPipeline basePipelineHandle;
+ deInt32 basePipelineIndex;
+};
+
+struct VkComputePipelineCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineShaderStageCreateInfo cs;
+ VkPipelineCreateFlags flags;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ deInt32 basePipelineIndex;
+};
+
+struct VkPushConstantRange
+{
+ VkShaderStageFlags stageFlags;
+ deUint32 start;
+ deUint32 length;
+};
+
+struct VkPipelineLayoutCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 descriptorSetCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+ deUint32 pushConstantRangeCount;
+ const VkPushConstantRange* pPushConstantRanges;
+};
+
+struct VkSamplerCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkTexFilter magFilter;
+ VkTexFilter minFilter;
+ VkTexMipmapMode mipMode;
+ VkTexAddress addressU;
+ VkTexAddress addressV;
+ VkTexAddress addressW;
+ float mipLodBias;
+ float maxAnisotropy;
+ VkBool32 compareEnable;
+ VkCompareOp compareOp;
+ float minLod;
+ float maxLod;
+ VkBorderColor borderColor;
+};
+
+struct VkDescriptorSetLayoutBinding
+{
+ VkDescriptorType descriptorType;
+ deUint32 arraySize;
+ VkShaderStageFlags stageFlags;
+ const VkSampler* pImmutableSamplers;
+};
+
+struct VkDescriptorSetLayoutCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 count;
+ const VkDescriptorSetLayoutBinding* pBinding;
+};
+
+struct VkDescriptorTypeCount
+{
+ VkDescriptorType type;
+ deUint32 count;
+};
+
+struct VkDescriptorPoolCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 count;
+ const VkDescriptorTypeCount* pTypeCount;
+};
+
+struct VkDescriptorInfo
+{
+ VkBufferView bufferView;
+ VkSampler sampler;
+ VkImageView imageView;
+ VkAttachmentView attachmentView;
+ VkImageLayout imageLayout;
+};
+
+struct VkWriteDescriptorSet
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet destSet;
+ deUint32 destBinding;
+ deUint32 destArrayElement;
+ deUint32 count;
+ VkDescriptorType descriptorType;
+ const VkDescriptorInfo* pDescriptors;
+};
+
+struct VkCopyDescriptorSet
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet srcSet;
+ deUint32 srcBinding;
+ deUint32 srcArrayElement;
+ VkDescriptorSet destSet;
+ deUint32 destBinding;
+ deUint32 destArrayElement;
+ deUint32 count;
+};
+
+struct VkViewport
+{
+ float originX;
+ float originY;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+};
+
+struct VkOffset2D
+{
+ deInt32 x;
+ deInt32 y;
+};
+
+struct VkExtent2D
+{
+ deInt32 width;
+ deInt32 height;
+};
+
+struct VkRect2D
+{
+ VkOffset2D offset;
+ VkExtent2D extent;
+};
+
+struct VkDynamicViewportStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 viewportAndScissorCount;
+ const VkViewport* pViewports;
+ const VkRect2D* pScissors;
+};
+
+struct VkDynamicRasterStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ float depthBias;
+ float depthBiasClamp;
+ float slopeScaledDepthBias;
+ float lineWidth;
+};
+
+struct VkDynamicColorBlendStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ float blendConst[4];
+};
+
+struct VkDynamicDepthStencilStateCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ float minDepthBounds;
+ float maxDepthBounds;
+ deUint32 stencilReadMask;
+ deUint32 stencilWriteMask;
+ deUint32 stencilFrontRef;
+ deUint32 stencilBackRef;
+};
+
+struct VkAttachmentBindInfo
+{
+ VkAttachmentView view;
+ VkImageLayout layout;
+};
+
+struct VkFramebufferCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ deUint32 attachmentCount;
+ const VkAttachmentBindInfo* pAttachments;
+ deUint32 width;
+ deUint32 height;
+ deUint32 layers;
+};
+
+struct VkAttachmentDescription
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkFormat format;
+ deUint32 samples;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkAttachmentLoadOp stencilLoadOp;
+ VkAttachmentStoreOp stencilStoreOp;
+ VkImageLayout initialLayout;
+ VkImageLayout finalLayout;
+};
+
+struct VkAttachmentReference
+{
+ deUint32 attachment;
+ VkImageLayout layout;
+};
+
+struct VkSubpassDescription
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkSubpassDescriptionFlags flags;
+ deUint32 inputCount;
+ const VkAttachmentReference* inputAttachments;
+ deUint32 colorCount;
+ const VkAttachmentReference* colorAttachments;
+ const VkAttachmentReference* resolveAttachments;
+ VkAttachmentReference depthStencilAttachment;
+ deUint32 preserveCount;
+ const VkAttachmentReference* preserveAttachments;
+};
+
+struct VkSubpassDependency
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 srcSubpass;
+ deUint32 destSubpass;
+ VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags destStageMask;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+ VkBool32 byRegion;
+};
+
+struct VkRenderPassCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 attachmentCount;
+ const VkAttachmentDescription* pAttachments;
+ deUint32 subpassCount;
+ const VkSubpassDescription* pSubpasses;
+ deUint32 dependencyCount;
+ const VkSubpassDependency* pDependencies;
+};
+
+struct VkCmdPoolCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 queueFamilyIndex;
+ VkCmdPoolCreateFlags flags;
+};
+
+struct VkCmdBufferCreateInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkCmdPool cmdPool;
+ VkCmdBufferLevel level;
+ VkCmdBufferCreateFlags flags;
+};
+
+struct VkCmdBufferBeginInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkCmdBufferOptimizeFlags flags;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+};
+
+struct VkBufferCopy
+{
+ VkDeviceSize srcOffset;
+ VkDeviceSize destOffset;
+ VkDeviceSize copySize;
+};
+
+struct VkImageCopy
+{
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D extent;
+};
+
+struct VkImageBlit
+{
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkExtent3D srcExtent;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D destExtent;
+};
+
+struct VkBufferImageCopy
+{
+ VkDeviceSize bufferOffset;
+ deUint32 bufferRowLength;
+ deUint32 bufferImageHeight;
+ VkImageSubresource imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+};
+
+union VkClearColorValue
+{
+ float f32[4];
+ deInt32 s32[4];
+ deUint32 u32[4];
+};
+
+struct VkRect3D
+{
+ VkOffset3D offset;
+ VkExtent3D extent;
+};
+
+struct VkImageResolve
+{
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D extent;
+};
+
+struct VkClearDepthStencilValue
+{
+ float depth;
+ deUint32 stencil;
+};
+
+union VkClearValue
+{
+ VkClearColorValue color;
+ VkClearDepthStencilValue ds;
+};
+
+struct VkRenderPassBeginInfo
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+ VkRect2D renderArea;
+ deUint32 attachmentCount;
+ const VkClearValue* pAttachmentClearValues;
+};
+
+struct VkBufferMemoryBarrier
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+ deUint32 srcQueueFamilyIndex;
+ deUint32 destQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+};
+
+struct VkDispatchIndirectCmd
+{
+ deUint32 x;
+ deUint32 y;
+ deUint32 z;
+};
+
+struct VkDrawIndexedIndirectCmd
+{
+ deUint32 indexCount;
+ deUint32 instanceCount;
+ deUint32 firstIndex;
+ deInt32 vertexOffset;
+ deUint32 firstInstance;
+};
+
+struct VkDrawIndirectCmd
+{
+ deUint32 vertexCount;
+ deUint32 instanceCount;
+ deUint32 firstVertex;
+ deUint32 firstInstance;
+};
+
+struct VkImageMemoryBarrier
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ deUint32 srcQueueFamilyIndex;
+ deUint32 destQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+};
+
+struct VkMemoryBarrier
+{
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOutputFlags outputMask;
+ VkMemoryInputFlags inputMask;
+};
+
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+virtual VkResult destroyDevice (VkDevice device) const = 0;
+virtual VkResult getGlobalExtensionProperties (const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties) const = 0;
+virtual VkResult getPhysicalDeviceExtensionProperties (VkPhysicalDevice physicalDevice, const char* pLayerName, deUint32* pCount, VkExtensionProperties* pProperties) const = 0;
+virtual VkResult getGlobalLayerProperties (deUint32* pCount, VkLayerProperties* pProperties) const = 0;
+virtual VkResult getPhysicalDeviceLayerProperties (VkPhysicalDevice physicalDevice, deUint32* pCount, VkLayerProperties* pProperties) const = 0;
+virtual VkResult getDeviceQueue (VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex, VkQueue* pQueue) const = 0;
+virtual VkResult queueSubmit (VkQueue queue, deUint32 cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence) const = 0;
+virtual VkResult queueWaitIdle (VkQueue queue) const = 0;
+virtual VkResult deviceWaitIdle (VkDevice device) const = 0;
+virtual VkResult allocMemory (VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem) const = 0;
+virtual VkResult freeMemory (VkDevice device, VkDeviceMemory mem) const = 0;
+virtual VkResult mapMemory (VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData) const = 0;
+virtual VkResult unmapMemory (VkDevice device, VkDeviceMemory mem) const = 0;
+virtual VkResult flushMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges) const = 0;
+virtual VkResult invalidateMappedMemoryRanges (VkDevice device, deUint32 memRangeCount, const VkMappedMemoryRange* pMemRanges) const = 0;
+virtual VkResult getDeviceMemoryCommitment (VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes) const = 0;
+virtual VkResult bindBufferMemory (VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset) const = 0;
+virtual VkResult bindImageMemory (VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset) const = 0;
+virtual VkResult getBufferMemoryRequirements (VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements) const = 0;
+virtual VkResult getImageMemoryRequirements (VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements) const = 0;
+virtual VkResult getImageSparseMemoryRequirements (VkDevice device, VkImage image, deUint32* pNumRequirements, VkSparseImageMemoryRequirements* pSparseMemoryRequirements) const = 0;
+virtual VkResult getPhysicalDeviceSparseImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, deUint32 samples, VkImageUsageFlags usage, VkImageTiling tiling, deUint32* pNumProperties, VkSparseImageFormatProperties* pProperties) const = 0;
+virtual VkResult queueBindSparseBufferMemory (VkQueue queue, VkBuffer buffer, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo) const = 0;
+virtual VkResult queueBindSparseImageOpaqueMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseMemoryBindInfo* pBindInfo) const = 0;
+virtual VkResult queueBindSparseImageMemory (VkQueue queue, VkImage image, deUint32 numBindings, const VkSparseImageMemoryBindInfo* pBindInfo) const = 0;
+virtual VkResult createFence (VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence) const = 0;
+virtual VkResult destroyFence (VkDevice device, VkFence fence) const = 0;
+virtual VkResult resetFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences) const = 0;
+virtual VkResult getFenceStatus (VkDevice device, VkFence fence) const = 0;
+virtual VkResult waitForFences (VkDevice device, deUint32 fenceCount, const VkFence* pFences, VkBool32 waitAll, deUint64 timeout) const = 0;
+virtual VkResult createSemaphore (VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore) const = 0;
+virtual VkResult destroySemaphore (VkDevice device, VkSemaphore semaphore) const = 0;
+virtual VkResult queueSignalSemaphore (VkQueue queue, VkSemaphore semaphore) const = 0;
+virtual VkResult queueWaitSemaphore (VkQueue queue, VkSemaphore semaphore) const = 0;
+virtual VkResult createEvent (VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent) const = 0;
+virtual VkResult destroyEvent (VkDevice device, VkEvent event) const = 0;
+virtual VkResult getEventStatus (VkDevice device, VkEvent event) const = 0;
+virtual VkResult setEvent (VkDevice device, VkEvent event) const = 0;
+virtual VkResult resetEvent (VkDevice device, VkEvent event) const = 0;
+virtual VkResult createQueryPool (VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool) const = 0;
+virtual VkResult destroyQueryPool (VkDevice device, VkQueryPool queryPool) const = 0;
+virtual VkResult getQueryPoolResults (VkDevice device, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, deUintptr* pDataSize, void* pData, VkQueryResultFlags flags) const = 0;
+virtual VkResult createBuffer (VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer) const = 0;
+virtual VkResult destroyBuffer (VkDevice device, VkBuffer buffer) const = 0;
+virtual VkResult createBufferView (VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView) const = 0;
+virtual VkResult destroyBufferView (VkDevice device, VkBufferView bufferView) const = 0;
+virtual VkResult createImage (VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage) const = 0;
+virtual VkResult destroyImage (VkDevice device, VkImage image) const = 0;
+virtual VkResult getImageSubresourceLayout (VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const = 0;
+virtual VkResult createImageView (VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView) const = 0;
+virtual VkResult destroyImageView (VkDevice device, VkImageView imageView) const = 0;
+virtual VkResult createAttachmentView (VkDevice device, const VkAttachmentViewCreateInfo* pCreateInfo, VkAttachmentView* pView) const = 0;
+virtual VkResult destroyAttachmentView (VkDevice device, VkAttachmentView attachmentView) const = 0;
+virtual VkResult createShaderModule (VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModule* pShaderModule) const = 0;
+virtual VkResult destroyShaderModule (VkDevice device, VkShaderModule shaderModule) const = 0;
+virtual VkResult createShader (VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader) const = 0;
+virtual VkResult destroyShader (VkDevice device, VkShader shader) const = 0;
+virtual VkResult createPipelineCache (VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, VkPipelineCache* pPipelineCache) const = 0;
+virtual VkResult destroyPipelineCache (VkDevice device, VkPipelineCache pipelineCache) const = 0;
+virtual deUintptr getPipelineCacheSize (VkDevice device, VkPipelineCache pipelineCache) const = 0;
+virtual VkResult getPipelineCacheData (VkDevice device, VkPipelineCache pipelineCache, void* pData) const = 0;
+virtual VkResult mergePipelineCaches (VkDevice device, VkPipelineCache destCache, deUint32 srcCacheCount, const VkPipelineCache* pSrcCaches) const = 0;
+virtual VkResult createGraphicsPipelines (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkGraphicsPipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines) const = 0;
+virtual VkResult createComputePipelines (VkDevice device, VkPipelineCache pipelineCache, deUint32 count, const VkComputePipelineCreateInfo* pCreateInfos, VkPipeline* pPipelines) const = 0;
+virtual VkResult destroyPipeline (VkDevice device, VkPipeline pipeline) const = 0;
+virtual VkResult createPipelineLayout (VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, VkPipelineLayout* pPipelineLayout) const = 0;
+virtual VkResult destroyPipelineLayout (VkDevice device, VkPipelineLayout pipelineLayout) const = 0;
+virtual VkResult createSampler (VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler) const = 0;
+virtual VkResult destroySampler (VkDevice device, VkSampler sampler) const = 0;
+virtual VkResult createDescriptorSetLayout (VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout) const = 0;
+virtual VkResult destroyDescriptorSetLayout (VkDevice device, VkDescriptorSetLayout descriptorSetLayout) const = 0;
+virtual VkResult createDescriptorPool (VkDevice device, VkDescriptorPoolUsage poolUsage, deUint32 maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool) const = 0;
+virtual VkResult destroyDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool) const = 0;
+virtual VkResult resetDescriptorPool (VkDevice device, VkDescriptorPool descriptorPool) const = 0;
+virtual VkResult allocDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, deUint32 count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, deUint32* pCount) const = 0;
+virtual VkResult freeDescriptorSets (VkDevice device, VkDescriptorPool descriptorPool, deUint32 count, const VkDescriptorSet* pDescriptorSets) const = 0;
+virtual VkResult updateDescriptorSets (VkDevice device, deUint32 writeCount, const VkWriteDescriptorSet* pDescriptorWrites, deUint32 copyCount, const VkCopyDescriptorSet* pDescriptorCopies) const = 0;
+virtual VkResult createDynamicViewportState (VkDevice device, const VkDynamicViewportStateCreateInfo* pCreateInfo, VkDynamicViewportState* pState) const = 0;
+virtual VkResult destroyDynamicViewportState (VkDevice device, VkDynamicViewportState dynamicViewportState) const = 0;
+virtual VkResult createDynamicRasterState (VkDevice device, const VkDynamicRasterStateCreateInfo* pCreateInfo, VkDynamicRasterState* pState) const = 0;
+virtual VkResult destroyDynamicRasterState (VkDevice device, VkDynamicRasterState dynamicRasterState) const = 0;
+virtual VkResult createDynamicColorBlendState (VkDevice device, const VkDynamicColorBlendStateCreateInfo* pCreateInfo, VkDynamicColorBlendState* pState) const = 0;
+virtual VkResult destroyDynamicColorBlendState (VkDevice device, VkDynamicColorBlendState dynamicColorBlendState) const = 0;
+virtual VkResult createDynamicDepthStencilState (VkDevice device, const VkDynamicDepthStencilStateCreateInfo* pCreateInfo, VkDynamicDepthStencilState* pState) const = 0;
+virtual VkResult destroyDynamicDepthStencilState (VkDevice device, VkDynamicDepthStencilState dynamicDepthStencilState) const = 0;
+virtual VkResult createFramebuffer (VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer) const = 0;
+virtual VkResult destroyFramebuffer (VkDevice device, VkFramebuffer framebuffer) const = 0;
+virtual VkResult createRenderPass (VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass) const = 0;
+virtual VkResult destroyRenderPass (VkDevice device, VkRenderPass renderPass) const = 0;
+virtual VkResult getRenderAreaGranularity (VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity) const = 0;
+virtual VkResult createCommandPool (VkDevice device, const VkCmdPoolCreateInfo* pCreateInfo, VkCmdPool* pCmdPool) const = 0;
+virtual VkResult destroyCommandPool (VkDevice device, VkCmdPool cmdPool) const = 0;
+virtual VkResult resetCommandPool (VkDevice device, VkCmdPool cmdPool, VkCmdPoolResetFlags flags) const = 0;
+virtual VkResult createCommandBuffer (VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer) const = 0;
+virtual VkResult destroyCommandBuffer (VkDevice device, VkCmdBuffer commandBuffer) const = 0;
+virtual VkResult beginCommandBuffer (VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo) const = 0;
+virtual VkResult endCommandBuffer (VkCmdBuffer cmdBuffer) const = 0;
+virtual VkResult resetCommandBuffer (VkCmdBuffer cmdBuffer, VkCmdBufferResetFlags flags) const = 0;
+virtual void cmdBindPipeline (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const = 0;
+virtual void cmdBindDynamicViewportState (VkCmdBuffer cmdBuffer, VkDynamicViewportState dynamicViewportState) const = 0;
+virtual void cmdBindDynamicRasterState (VkCmdBuffer cmdBuffer, VkDynamicRasterState dynamicRasterState) const = 0;
+virtual void cmdBindDynamicColorBlendState (VkCmdBuffer cmdBuffer, VkDynamicColorBlendState dynamicColorBlendState) const = 0;
+virtual void cmdBindDynamicDepthStencilState (VkCmdBuffer cmdBuffer, VkDynamicDepthStencilState dynamicDepthStencilState) const = 0;
+virtual void cmdBindDescriptorSets (VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, deUint32 firstSet, deUint32 setCount, const VkDescriptorSet* pDescriptorSets, deUint32 dynamicOffsetCount, const deUint32* pDynamicOffsets) const = 0;
+virtual void cmdBindIndexBuffer (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const = 0;
+virtual void cmdBindVertexBuffers (VkCmdBuffer cmdBuffer, deUint32 startBinding, deUint32 bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets) const = 0;
+virtual void cmdDraw (VkCmdBuffer cmdBuffer, deUint32 firstVertex, deUint32 vertexCount, deUint32 firstInstance, deUint32 instanceCount) const = 0;
+virtual void cmdDrawIndexed (VkCmdBuffer cmdBuffer, deUint32 firstIndex, deUint32 indexCount, deInt32 vertexOffset, deUint32 firstInstance, deUint32 instanceCount) const = 0;
+virtual void cmdDrawIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride) const = 0;
+virtual void cmdDrawIndexedIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, deUint32 count, deUint32 stride) const = 0;
+virtual void cmdDispatch (VkCmdBuffer cmdBuffer, deUint32 x, deUint32 y, deUint32 z) const = 0;
+virtual void cmdDispatchIndirect (VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset) const = 0;
+virtual void cmdCopyBuffer (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, deUint32 regionCount, const VkBufferCopy* pRegions) const = 0;
+virtual void cmdCopyImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageCopy* pRegions) const = 0;
+virtual void cmdBlitImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageBlit* pRegions, VkTexFilter filter) const = 0;
+virtual void cmdCopyBufferToImage (VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkBufferImageCopy* pRegions) const = 0;
+virtual void cmdCopyImageToBuffer (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, deUint32 regionCount, const VkBufferImageCopy* pRegions) const = 0;
+virtual void cmdUpdateBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const deUint32* pData) const = 0;
+virtual void cmdFillBuffer (VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, deUint32 data) const = 0;
+virtual void cmdClearColorImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rangeCount, const VkImageSubresourceRange* pRanges) const = 0;
+virtual void cmdClearDepthStencilImage (VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rangeCount, const VkImageSubresourceRange* pRanges) const = 0;
+virtual void cmdClearColorAttachment (VkCmdBuffer cmdBuffer, deUint32 colorAttachment, VkImageLayout imageLayout, const VkClearColorValue* pColor, deUint32 rectCount, const VkRect3D* pRects) const = 0;
+virtual void cmdClearDepthStencilAttachment (VkCmdBuffer cmdBuffer, VkImageAspectFlags imageAspectMask, VkImageLayout imageLayout, float depth, deUint32 stencil, deUint32 rectCount, const VkRect3D* pRects) const = 0;
+virtual void cmdResolveImage (VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, deUint32 regionCount, const VkImageResolve* pRegions) const = 0;
+virtual void cmdSetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) const = 0;
+virtual void cmdResetEvent (VkCmdBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) const = 0;
+virtual void cmdWaitEvents (VkCmdBuffer cmdBuffer, deUint32 eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, deUint32 memBarrierCount, const void* const* ppMemBarriers) const = 0;
+virtual void cmdPipelineBarrier (VkCmdBuffer cmdBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags destStageMask, VkBool32 byRegion, deUint32 memBarrierCount, const void* const* ppMemBarriers) const = 0;
+virtual void cmdBeginQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot, VkQueryControlFlags flags) const = 0;
+virtual void cmdEndQuery (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 slot) const = 0;
+virtual void cmdResetQueryPool (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount) const = 0;
+virtual void cmdWriteTimestamp (VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset) const = 0;
+virtual void cmdCopyQueryPoolResults (VkCmdBuffer cmdBuffer, VkQueryPool queryPool, deUint32 startQuery, deUint32 queryCount, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize destStride, VkQueryResultFlags flags) const = 0;
+virtual void cmdPushConstants (VkCmdBuffer cmdBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, deUint32 start, deUint32 length, const void* values) const = 0;
+virtual void cmdBeginRenderPass (VkCmdBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkRenderPassContents contents) const = 0;
+virtual void cmdNextSubpass (VkCmdBuffer cmdBuffer, VkRenderPassContents contents) const = 0;
+virtual void cmdEndRenderPass (VkCmdBuffer cmdBuffer) const = 0;
+virtual void cmdExecuteCommands (VkCmdBuffer cmdBuffer, deUint32 cmdBuffersCount, const VkCmdBuffer* pCmdBuffers) const = 0;
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+virtual VkResult destroyInstance (VkInstance instance) const = 0;
+virtual VkResult enumeratePhysicalDevices (VkInstance instance, deUint32* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const = 0;
+virtual VkResult getPhysicalDeviceFeatures (VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures) const = 0;
+virtual VkResult getPhysicalDeviceFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties) const = 0;
+virtual VkResult getPhysicalDeviceImageFormatProperties (VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageFormatProperties* pImageFormatProperties) const = 0;
+virtual VkResult getPhysicalDeviceLimits (VkPhysicalDevice physicalDevice, VkPhysicalDeviceLimits* pLimits) const = 0;
+virtual VkResult getPhysicalDeviceProperties (VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties) const = 0;
+virtual VkResult getPhysicalDeviceQueueCount (VkPhysicalDevice physicalDevice, deUint32* pCount) const = 0;
+virtual VkResult getPhysicalDeviceQueueProperties (VkPhysicalDevice physicalDevice, deUint32 count, VkPhysicalDeviceQueueProperties* pQueueProperties) const = 0;
+virtual VkResult getPhysicalDeviceMemoryProperties (VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties) const = 0;
+virtual PFN_vkVoidFunction getDeviceProcAddr (VkDevice device, const char* pName) const = 0;
+virtual VkResult createDevice (VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice) const = 0;
--- /dev/null
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */
+virtual VkResult createInstance (const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance) const = 0;
+virtual PFN_vkVoidFunction getInstanceProcAddr (VkInstance instance, const char* pName) const = 0;
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#-------------------------------------------------------------------------
+# Vulkan CTS
+# ----------
+#
+# Copyright (c) 2015 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and/or associated documentation files (the
+# "Materials"), to deal in the Materials without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Materials, and to
+# permit persons to whom the Materials are furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice(s) and this permission notice shall be
+# included in all copies or substantial portions of the Materials.
+#
+# The Materials are Confidential Information as defined by the
+# Khronos Membership Agreement until designated non-confidential by
+# Khronos, at which point this condition clause shall be removed.
+#
+# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+#
+#-------------------------------------------------------------------------
+
+import os
+import re
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "scripts"))
+
+from build.common import DEQP_DIR
+from khr_util.format import indentLines, writeInlFile
+
+VULKAN_DIR = os.path.join(os.path.dirname(__file__), "framework", "vulkan")
+
+INL_HEADER = """\
+/* WARNING: This is auto-generated file. Do not modify, since changes will
+ * be lost! Modify the generating script instead.
+ */\
+"""
+
+PLATFORM_FUNCTIONS = [
+ "vkCreateInstance",
+ "vkGetInstanceProcAddr"
+]
+INSTANCE_FUNCTIONS = [
+ "vkDestroyInstance",
+ "vkEnumeratePhysicalDevices",
+ "vkGetPhysicalDeviceFeatures",
+ "vkGetPhysicalDeviceFormatProperties",
+ "vkGetPhysicalDeviceImageFormatProperties",
+ "vkGetPhysicalDeviceLimits",
+ "vkGetPhysicalDeviceProperties",
+ "vkGetPhysicalDeviceQueueCount",
+ "vkGetPhysicalDeviceQueueProperties",
+ "vkGetPhysicalDeviceMemoryProperties",
+ "vkCreateDevice",
+ "vkGetDeviceProcAddr"
+]
+
+DEFINITIONS = [
+ "VK_API_VERSION",
+ "VK_MAX_PHYSICAL_DEVICE_NAME",
+ "VK_MAX_EXTENSION_NAME",
+ "VK_UUID_LENGTH",
+ "VK_MAX_MEMORY_TYPES",
+ "VK_MAX_MEMORY_HEAPS",
+ "VK_MAX_DESCRIPTION",
+ "VK_FALSE",
+ "VK_TRUE",
+ "VK_ATTACHMENT_UNUSED"
+]
+
+class Handle:
+ TYPE_DISP = 0
+ TYPE_NONDISP = 1
+
+ def __init__ (self, type, name):
+ self.type = type
+ self.name = name
+
+ def getHandleType (self):
+ name = re.sub(r'([A-Z])', r'_\1', self.name)
+ return "HANDLE_TYPE_" + name[4:].upper()
+
+class Enum:
+ def __init__ (self, name, values):
+ self.name = name
+ self.values = values
+
+class Bitfield:
+ def __init__ (self, name, values):
+ self.name = name
+ self.values = values
+
+class Variable:
+ def __init__ (self, type, name):
+ self.type = type
+ self.name = name
+
+class CompositeType:
+ CLASS_STRUCT = 0
+ CLASS_UNION = 1
+
+ def __init__ (self, typeClass, name, members):
+ self.typeClass = typeClass
+ self.name = name
+ self.members = members
+
+ def getClassName (self):
+ names = {CompositeType.CLASS_STRUCT: 'struct', CompositeType.CLASS_UNION: 'union'}
+ return names[self.typeClass]
+
+class Function:
+ TYPE_PLATFORM = 0 # Not bound to anything
+ TYPE_INSTANCE = 1 # Bound to VkInstance
+ TYPE_DEVICE = 2 # Bound to VkDevice
+
+ def __init__ (self, name, returnType, arguments):
+ self.name = name
+ self.returnType = returnType
+ self.arguments = arguments
+
+ def getType (self):
+ if self.name in PLATFORM_FUNCTIONS:
+ return Function.TYPE_PLATFORM
+ elif self.name in INSTANCE_FUNCTIONS:
+ return Function.TYPE_INSTANCE
+ else:
+ return Function.TYPE_DEVICE
+
+class API:
+ def __init__ (self, definitions, handles, enums, bitfields, compositeTypes, functions):
+ self.definitions = definitions
+ self.handles = handles
+ self.enums = enums
+ self.bitfields = bitfields
+ self.compositeTypes = compositeTypes
+ self.functions = functions
+
+def readFile (filename):
+ with open(filename, 'rb') as f:
+ return f.read()
+
+IDENT_PTRN = r'[a-zA-Z_][a-zA-Z0-9_]*'
+TYPE_PTRN = r'[a-zA-Z_][a-zA-Z0-9_ \t*]*'
+
+def endswith (s, postfix):
+ return len(s) >= len(postfix) and s[len(s)-len(postfix):] == postfix
+
+def fixupEnumValues (values):
+ fixed = []
+ for name, value in values:
+ if endswith(name, "_BEGIN_RANGE") or endswith(name, "_END_RANGE"):
+ continue
+ fixed.append((name, value))
+ return fixed
+
+def fixupType (type):
+ replacements = [
+ ("uint8_t", "deUint8"),
+ ("uint16_t", "deUint16"),
+ ("uint32_t", "deUint32"),
+ ("uint64_t", "deUint64"),
+ ("int8_t", "deInt8"),
+ ("int16_t", "deInt16"),
+ ("int32_t", "deInt32"),
+ ("int64_t", "deInt64"),
+ ("bool32_t", "deUint32"),
+ ("size_t", "deUintptr"),
+ ]
+
+ for src, dst in replacements:
+ type = type.replace(src, dst)
+
+ return type
+
+def fixupFunction (function):
+ fixedArgs = [Variable(fixupType(a.type), a.name) for a in function.arguments]
+ fixedReturnType = fixupType(function.returnType)
+
+ return Function(function.name, fixedReturnType, fixedArgs)
+
+def getInterfaceName (function):
+ assert function.name[:2] == "vk"
+ return function.name[2].lower() + function.name[3:]
+
+def getFunctionTypeName (function):
+ assert function.name[:2] == "vk"
+ return function.name[2:] + "Func"
+
+def getBitEnumNameForBitfield (bitfieldName):
+ assert bitfieldName[-1] == "s"
+ return bitfieldName[:-1] + "Bits"
+
+def getBitfieldNameForBitEnum (bitEnumName):
+ assert bitEnumName[-4:] == "Bits"
+ return bitEnumName[:-4] + "s"
+
+def parsePreprocDefinedValue (src, name):
+ definition = re.search(r'#\s*define\s+' + name + r'\s+([^\n]+)\n', src)
+ if definition is None:
+ raise Exception("No such definition: %s" % name)
+ value = definition.group(1).strip()
+
+ if value == "UINT32_MAX":
+ value = "(~0u)"
+
+ return value
+
+def parseEnum (name, src):
+ keyValuePtrn = '(' + IDENT_PTRN + r')\s*=\s*([^\s,}]+)\s*[,}]'
+ matches = re.findall(keyValuePtrn, src)
+
+ return Enum(name, fixupEnumValues(matches))
+
+# \note Parses raw enums, some are mapped to bitfields later
+def parseEnums (src):
+ matches = re.findall(r'typedef enum\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src)
+ enums = []
+
+ for contents, name in matches:
+ enums.append(parseEnum(name, contents))
+
+ return enums
+
+def parseCompositeType (type, name, src):
+ # \todo [pyry] Array support is currently a hack (size coupled with name)
+ typeNamePtrn = r'(' + TYPE_PTRN + ')(\s' + IDENT_PTRN + r'(\[[^\]]+\])*)\s*;'
+ matches = re.findall(typeNamePtrn, src)
+ members = [Variable(fixupType(t.strip()), n.strip()) for t, n, a in matches]
+
+ return CompositeType(type, name, members)
+
+def parseCompositeTypes (src):
+ typeMap = { 'struct': CompositeType.CLASS_STRUCT, 'union': CompositeType.CLASS_UNION }
+ matches = re.findall(r'typedef (struct|union)\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src)
+ types = []
+
+ for type, contents, name in matches:
+ types.append(parseCompositeType(typeMap[type], name, contents))
+
+ return types
+
+def parseHandles (src):
+ matches = re.findall(r'VK_DEFINE(_NONDISP|)_HANDLE\((' + IDENT_PTRN + r')\)[ \t]*[\n\r]', src)
+ handles = []
+ typeMap = {'': Handle.TYPE_DISP, '_NONDISP': Handle.TYPE_NONDISP}
+
+ for type, name in matches:
+ handle = Handle(typeMap[type], name)
+ handles.append(handle)
+
+ return handles
+
+def parseArgList (src):
+ typeNamePtrn = r'(' + TYPE_PTRN + ')(\s' + IDENT_PTRN + r')'
+ args = []
+
+ for rawArg in src.split(','):
+ m = re.search(typeNamePtrn, rawArg)
+ args.append(Variable(m.group(1).strip(), m.group(2).strip()))
+
+ return args
+
+def parseFunctions (src):
+ ptrn = r'(' + TYPE_PTRN + ')VKAPI\s+(' + IDENT_PTRN + r')\s*\(([^)]*)\)\s*;'
+ matches = re.findall(ptrn, src)
+ functions = []
+
+ for returnType, name, argList in matches:
+ functions.append(Function(name.strip(), returnType.strip(), parseArgList(argList)))
+
+ return [fixupFunction(f) for f in functions]
+
+def parseBitfieldNames (src):
+ ptrn = r'typedef\s+VkFlags\s(' + IDENT_PTRN + r')\s*;'
+ matches = re.findall(ptrn, src)
+
+ return matches
+
+def parseAPI (src):
+ definitions = [(name, parsePreprocDefinedValue(src, name)) for name in DEFINITIONS]
+ rawEnums = parseEnums(src)
+ bitfieldNames = parseBitfieldNames(src)
+ enums = []
+ bitfields = []
+ bitfieldEnums = set([getBitEnumNameForBitfield(n) for n in bitfieldNames])
+
+ for enum in rawEnums:
+ if enum.name in bitfieldEnums:
+ bitfields.append(Bitfield(getBitfieldNameForBitEnum(enum.name), enum.values))
+ else:
+ enums.append(enum)
+
+ return API(
+ definitions = definitions,
+ handles = parseHandles(src),
+ enums = enums,
+ bitfields = bitfields,
+ compositeTypes = parseCompositeTypes(src),
+ functions = parseFunctions(src))
+
+def writeHandleType (api, filename):
+ def gen ():
+ yield "enum HandleType"
+ yield "{"
+ yield "\t%s = 0," % api.handles[0].getHandleType()
+ for handle in api.handles[1:]:
+ yield "\t%s," % handle.getHandleType()
+ yield "\tHANDLE_TYPE_LAST"
+ yield "};"
+ yield ""
+
+ writeInlFile(filename, INL_HEADER, gen())
+
+def getEnumValuePrefix (enum):
+ prefix = enum.name[0]
+ for i in range(1, len(enum.name)):
+ if enum.name[i].isupper():
+ prefix += "_"
+ prefix += enum.name[i].upper()
+ return prefix
+
+def areEnumValuesLinear (enum):
+ curIndex = 0
+ for name, value in enum.values:
+ if int(value) != curIndex:
+ return False
+ curIndex += 1
+ return True
+
+def genEnumSrc (enum):
+ yield "enum %s" % enum.name
+ yield "{"
+
+ for line in indentLines(["\t%s\t= %s," % v for v in enum.values]):
+ yield line
+
+ if areEnumValuesLinear(enum):
+ yield ""
+ yield "\t%s_LAST" % getEnumValuePrefix(enum)
+
+ yield "};"
+
+def genBitfieldSrc (bitfield):
+ yield "enum %s" % getBitEnumNameForBitfield(bitfield.name)
+ yield "{"
+ for line in indentLines(["\t%s\t= %s," % v for v in bitfield.values]):
+ yield line
+ yield "};"
+ yield "typedef deUint32 %s;" % bitfield.name
+
+def genCompositeTypeSrc (type):
+ yield "%s %s" % (type.getClassName(), type.name)
+ yield "{"
+ for line in indentLines(["\t%s\t%s;" % (m.type, m.name) for m in type.members]):
+ yield line
+ yield "};"
+
+def genHandlesSrc (handles):
+ def genLines (handles):
+ for handle in handles:
+ if handle.type == Handle.TYPE_DISP:
+ yield "VK_DEFINE_HANDLE\t(%s,\t%s);" % (handle.name, handle.getHandleType())
+ elif handle.type == Handle.TYPE_NONDISP:
+ yield "VK_DEFINE_NONDISP_HANDLE\t(%s,\t%s);" % (handle.name, handle.getHandleType())
+
+ for line in indentLines(genLines(handles)):
+ yield line
+
+def writeBasicTypes (api, filename):
+ def gen ():
+ for line in indentLines(["enum { %s\t= %s\t};" % define for define in api.definitions]):
+ yield line
+ yield ""
+ for line in genHandlesSrc(api.handles):
+ yield line
+ yield ""
+ for enum in api.enums:
+ for line in genEnumSrc(enum):
+ yield line
+ yield ""
+ for bitfield in api.bitfields:
+ for line in genBitfieldSrc(bitfield):
+ yield line
+ yield ""
+
+ writeInlFile(filename, INL_HEADER, gen())
+
+def writeCompositeTypes (api, filename):
+ def gen ():
+ for type in api.compositeTypes:
+ for line in genCompositeTypeSrc(type):
+ yield line
+ yield ""
+
+ writeInlFile(filename, INL_HEADER, gen())
+
+def argListToStr (args):
+ return ", ".join("%s %s" % (v.type, v.name) for v in args)
+
+def writeInterfaceDecl (api, filename, functionTypes, concrete):
+ def genProtos ():
+ postfix = "" if concrete else " = 0"
+ for function in api.functions:
+ if function.getType() in functionTypes:
+ yield "virtual %s\t%s\t(%s) const%s;" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments), postfix)
+
+ writeInlFile(filename, INL_HEADER, indentLines(genProtos()))
+
+def writeFunctionPtrTypes (api, filename):
+ def genTypes ():
+ for function in api.functions:
+ yield "typedef VK_APICALL %s\t(VK_APIENTRY* %s)\t(%s);" % (function.returnType, getFunctionTypeName(function), argListToStr(function.arguments))
+
+ writeInlFile(filename, INL_HEADER, indentLines(genTypes()))
+
+def writeFunctionPointers (api, filename, functionTypes):
+ writeInlFile(filename, INL_HEADER, indentLines(["%s\t%s;" % (getFunctionTypeName(function), getInterfaceName(function)) for function in api.functions if function.getType() in functionTypes]))
+
+def writeInitFunctionPointers (api, filename, functionTypes):
+ def makeInitFunctionPointers ():
+ for function in api.functions:
+ if function.getType() in functionTypes:
+ yield "m_vk.%s\t= (%s)\tGET_PROC_ADDR(\"%s\");" % (getInterfaceName(function), getFunctionTypeName(function), function.name)
+
+ writeInlFile(filename, INL_HEADER, indentLines(makeInitFunctionPointers()))
+
+def writeFuncPtrInterfaceImpl (api, filename, functionTypes, className):
+ def makeFuncPtrInterfaceImpl ():
+ for function in api.functions:
+ if function.getType() in functionTypes:
+ yield ""
+ yield "%s %s::%s (%s) const" % (function.returnType, className, getInterfaceName(function), argListToStr(function.arguments))
+ yield "{"
+ yield " %sm_vk.%s(%s);" % ("return " if function.returnType != "void" else "", getInterfaceName(function), ", ".join(a.name for a in function.arguments))
+ yield "}"
+
+ writeInlFile(filename, INL_HEADER, makeFuncPtrInterfaceImpl())
+
+def writeStrUtilProto (api, filename):
+ def makeStrUtilProto ():
+ for line in indentLines(["const char*\tget%sName\t(%s value);" % (enum.name[2:], enum.name) for enum in api.enums]):
+ yield line
+ yield ""
+ for line in indentLines(["inline tcu::Format::Enum<%s>\tget%sStr\t(%s value)\t{ return tcu::Format::Enum<%s>(get%sName, value);\t}" % (e.name, e.name[2:], e.name, e.name, e.name[2:]) for e in api.enums]):
+ yield line
+ yield ""
+ for line in indentLines(["inline std::ostream&\toperator<<\t(std::ostream& s, %s value)\t{ return s << get%sStr(value);\t}" % (e.name, e.name[2:]) for e in api.enums]):
+ yield line
+ yield ""
+ for line in indentLines(["tcu::Format::Bitfield<32>\tget%sStr\t(%s value);" % (bitfield.name[2:], bitfield.name) for bitfield in api.bitfields]):
+ yield line
+ yield ""
+ for line in indentLines(["std::ostream&\toperator<<\t(std::ostream& s, const %s& value);" % (s.name) for s in api.compositeTypes]):
+ yield line
+
+ writeInlFile(filename, INL_HEADER, makeStrUtilProto())
+
+def writeStrUtilImpl (api, filename):
+ def makeStrUtilImpl ():
+ for line in indentLines(["template<> const char*\tgetTypeName<%s>\t(void) { return \"%s\";\t}" % (handle.name, handle.name) for handle in api.handles]):
+ yield line
+
+ for enum in api.enums:
+ yield ""
+ yield "const char* get%sName (%s value)" % (enum.name[2:], enum.name)
+ yield "{"
+ yield "\tswitch (value)"
+ yield "\t{"
+ for line in indentLines(["\t\tcase %s:\treturn \"%s\";" % (n, n) for n, v in enum.values] + ["\t\tdefault:\treturn DE_NULL;"]):
+ yield line
+ yield "\t}"
+ yield "}"
+
+ for bitfield in api.bitfields:
+ yield ""
+ yield "tcu::Format::Bitfield<32> get%sStr (%s value)" % (bitfield.name[2:], bitfield.name)
+ yield "{"
+ yield "\tstatic const tcu::Format::BitDesc s_desc[] ="
+ yield "\t{"
+ for line in indentLines(["\t\ttcu::Format::BitDesc(%s,\t\"%s\")," % (n, n) for n, v in bitfield.values]):
+ yield line
+ yield "\t};"
+ yield "\treturn tcu::Format::Bitfield<32>(value, DE_ARRAY_BEGIN(s_desc), DE_ARRAY_END(s_desc));"
+ yield "}"
+
+ bitfieldTypeNames = set([bitfield.name for bitfield in api.bitfields])
+
+ for type in api.compositeTypes:
+ yield ""
+ yield "std::ostream& operator<< (std::ostream& s, const %s& value)" % type.name
+ yield "{"
+ yield "\ts << \"%s = {\\n\";" % type.name
+ for member in type.members:
+ memberName = member.name
+ valFmt = None
+ newLine = ""
+ if member.type in bitfieldTypeNames:
+ valFmt = "get%sStr(value.%s)" % (member.type[2:], member.name)
+ elif member.type == "const char*" or member.type == "char*":
+ valFmt = "getCharPtrStr(value.%s)" % member.name
+ elif '[' in member.name:
+ baseName = member.name[:member.name.find('[')]
+ if baseName == "extName" or baseName == "deviceName":
+ valFmt = "(const char*)value.%s" % baseName
+ else:
+ newLine = "'\\n' << "
+ valFmt = "tcu::formatArray(DE_ARRAY_BEGIN(value.%s), DE_ARRAY_END(value.%s))" % (baseName, baseName)
+ memberName = baseName
+ else:
+ valFmt = "value.%s" % member.name
+ yield ("\ts << \"\\t%s = \" << " % memberName) + newLine + valFmt + " << '\\n';"
+ yield "\ts << '}';"
+ yield "\treturn s;"
+ yield "}"
+
+
+ writeInlFile(filename, INL_HEADER, makeStrUtilImpl())
+
+class ConstructorFunction:
+ def __init__ (self, type, name, objectType, iface, arguments):
+ self.type = type
+ self.name = name
+ self.objectType = objectType
+ self.iface = iface
+ self.arguments = arguments
+
+def getConstructorFunctions (api):
+ funcs = []
+ for function in api.functions:
+ if (function.name[:8] == "vkCreate" or function.name == "vkAllocMemory") and not "count" in [a.name for a in function.arguments]:
+ # \todo [pyry] Rather hacky
+ iface = None
+ if function.getType() == Function.TYPE_PLATFORM:
+ iface = Variable("const PlatformInterface&", "vk")
+ elif function.getType() == Function.TYPE_INSTANCE:
+ iface = Variable("const InstanceInterface&", "vk")
+ else:
+ iface = Variable("const DeviceInterface&", "vk")
+ objectType = function.arguments[-1].type.replace("*", "").strip()
+ arguments = function.arguments[:-1]
+ funcs.append(ConstructorFunction(function.getType(), getInterfaceName(function), objectType, iface, arguments))
+ return funcs
+
+def writeRefUtilProto (api, filename):
+ functions = getConstructorFunctions(api)
+
+ def makeRefUtilProto ():
+ unindented = []
+ for line in indentLines(["Move<%s>\t%s\t(%s);" % (function.objectType, function.name, argListToStr([function.iface] + function.arguments)) for function in functions]):
+ yield line
+
+ writeInlFile(filename, INL_HEADER, makeRefUtilProto())
+
+def writeRefUtilImpl (api, filename):
+ functions = getConstructorFunctions(api)
+
+ def makeRefUtilImpl ():
+ yield "namespace refdetails"
+ yield "{"
+ yield ""
+
+ for function in api.functions:
+ if function.getType() == Function.TYPE_DEVICE \
+ and (function.name[:9] == "vkDestroy" or function.name == "vkFreeMemory") \
+ and not function.name == "vkDestroyDevice":
+ objectType = function.arguments[-1].type
+ yield "template<>"
+ yield "void Deleter<%s>::operator() (%s obj) const" % (objectType, objectType)
+ yield "{"
+ yield "\tDE_TEST_ASSERT(m_deviceIface->%s(m_device, obj) == VK_SUCCESS);" % (getInterfaceName(function))
+ yield "}"
+ yield ""
+
+ yield "} // refdetails"
+ yield ""
+
+ for function in functions:
+ dtorObj = "device" if function.type == Function.TYPE_DEVICE else "object"
+
+ yield "Move<%s> %s (%s)" % (function.objectType, function.name, argListToStr([function.iface] + function.arguments))
+ yield "{"
+ yield "\t%s object = 0;" % function.objectType
+ yield "\tVK_CHECK(vk.%s(%s));" % (function.name, ", ".join([a.name for a in function.arguments] + ["&object"]))
+ yield "\treturn Move<%s>(check<%s>(object), Deleter<%s>(vk, %s));" % (function.objectType, function.objectType, function.objectType, dtorObj)
+ yield "}"
+ yield ""
+
+ writeInlFile(filename, INL_HEADER, makeRefUtilImpl())
+
+def writeNullDriverImpl (api, filename):
+ def genNullDriverImpl ():
+ specialFuncNames = [
+ "vkCreateGraphicsPipelines",
+ "vkCreateComputePipelines",
+ "vkGetInstanceProcAddr",
+ "vkGetDeviceProcAddr",
+ "vkEnumeratePhysicalDevices",
+ "vkGetPhysicalDeviceProperties",
+ "vkGetPhysicalDeviceQueueCount",
+ "vkGetPhysicalDeviceQueueProperties",
+ "vkGetPhysicalDeviceMemoryProperties",
+ "vkGetBufferMemoryRequirements",
+ "vkGetImageMemoryRequirements",
+ "vkMapMemory",
+ "vkAllocDescriptorSets",
+ "vkFreeDescriptorSets",
+ ]
+ specialFuncs = [f for f in api.functions if f.name in specialFuncNames]
+ createFuncs = [f for f in api.functions if (f.name[:8] == "vkCreate" or f.name == "vkAllocMemory") and not f in specialFuncs]
+ destroyFuncs = [f for f in api.functions if (f.name[:9] == "vkDestroy" or f.name == "vkFreeMemory") and not f in specialFuncs]
+ dummyFuncs = [f for f in api.functions if f not in specialFuncs + createFuncs + destroyFuncs]
+
+ def getHandle (name):
+ for handle in api.handles:
+ if handle.name == name:
+ return handle
+ raise Exception("No such handle: %s" % name)
+
+ for function in createFuncs:
+ objectType = function.arguments[-1].type.replace("*", "").strip()
+ argsStr = ", ".join([a.name for a in function.arguments[:-1]])
+
+ yield "%s %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
+ yield "{"
+
+ if getHandle(objectType).type == Handle.TYPE_NONDISP:
+ yield "\tVK_NULL_RETURN(*%s = %s((deUint64)(deUintptr)new %s(%s)));" % (function.arguments[-1].name, objectType, objectType[2:], argsStr)
+ else:
+ yield "\tVK_NULL_RETURN(*%s = reinterpret_cast<%s>(new %s(%s)));" % (function.arguments[-1].name, objectType, objectType[2:], argsStr)
+
+ yield "}"
+ yield ""
+
+ for function in destroyFuncs:
+ yield "%s %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
+ yield "{"
+ for arg in function.arguments[:-1]:
+ yield "\tDE_UNREF(%s);" % arg.name
+
+ if getHandle(function.arguments[-1].type).type == Handle.TYPE_NONDISP:
+ yield "\tVK_NULL_RETURN(delete reinterpret_cast<%s*>((deUintptr)%s.getInternal()));" % (function.arguments[-1].type[2:], function.arguments[-1].name)
+ else:
+ yield "\tVK_NULL_RETURN(delete reinterpret_cast<%s*>(%s));" % (function.arguments[-1].type[2:], function.arguments[-1].name)
+
+ yield "}"
+ yield ""
+
+ for function in dummyFuncs:
+ yield "%s %s (%s)" % (function.returnType, getInterfaceName(function), argListToStr(function.arguments))
+ yield "{"
+ for arg in function.arguments:
+ yield "\tDE_UNREF(%s);" % arg.name
+ if function.returnType != "void":
+ yield "\treturn VK_SUCCESS;"
+ yield "}"
+ yield ""
+
+ def genFuncEntryTable (type, name):
+ funcs = [f for f in api.functions if f.getType() == type]
+
+ yield "static const tcu::StaticFunctionLibrary::Entry %s[] =" % name
+ yield "{"
+ for line in indentLines(["\tVK_NULL_FUNC_ENTRY(%s,\t%s)," % (function.name, getInterfaceName(function)) for function in funcs]):
+ yield line
+ yield "};"
+ yield ""
+
+ # Func tables
+ for line in genFuncEntryTable(Function.TYPE_PLATFORM, "s_platformFunctions"):
+ yield line
+
+ for line in genFuncEntryTable(Function.TYPE_INSTANCE, "s_instanceFunctions"):
+ yield line
+
+ for line in genFuncEntryTable(Function.TYPE_DEVICE, "s_deviceFunctions"):
+ yield line
+
+
+ writeInlFile(filename, INL_HEADER, genNullDriverImpl())
+
+if __name__ == "__main__":
+ src = readFile(sys.argv[1])
+ api = parseAPI(src)
+ platformFuncs = set([Function.TYPE_PLATFORM])
+ instanceFuncs = set([Function.TYPE_INSTANCE])
+ deviceFuncs = set([Function.TYPE_DEVICE])
+
+ writeHandleType (api, os.path.join(VULKAN_DIR, "vkHandleType.inl"))
+ writeBasicTypes (api, os.path.join(VULKAN_DIR, "vkBasicTypes.inl"))
+ writeCompositeTypes (api, os.path.join(VULKAN_DIR, "vkStructTypes.inl"))
+ writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualPlatformInterface.inl"), functionTypes = platformFuncs, concrete = False)
+ writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualInstanceInterface.inl"), functionTypes = instanceFuncs, concrete = False)
+ writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkVirtualDeviceInterface.inl"), functionTypes = deviceFuncs, concrete = False)
+ writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcretePlatformInterface.inl"), functionTypes = platformFuncs, concrete = True)
+ writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcreteInstanceInterface.inl"), functionTypes = instanceFuncs, concrete = True)
+ writeInterfaceDecl (api, os.path.join(VULKAN_DIR, "vkConcreteDeviceInterface.inl"), functionTypes = deviceFuncs, concrete = True)
+ writeFunctionPtrTypes (api, os.path.join(VULKAN_DIR, "vkFunctionPointerTypes.inl"))
+ writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkPlatformFunctionPointers.inl"), functionTypes = platformFuncs)
+ writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInstanceFunctionPointers.inl"), functionTypes = instanceFuncs)
+ writeFunctionPointers (api, os.path.join(VULKAN_DIR, "vkDeviceFunctionPointers.inl"), functionTypes = deviceFuncs)
+ writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitPlatformFunctionPointers.inl"), functionTypes = platformFuncs)
+ writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitInstanceFunctionPointers.inl"), functionTypes = instanceFuncs)
+ writeInitFunctionPointers (api, os.path.join(VULKAN_DIR, "vkInitDeviceFunctionPointers.inl"), functionTypes = deviceFuncs)
+ writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkPlatformDriverImpl.inl"), functionTypes = platformFuncs, className = "PlatformDriver")
+ writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkInstanceDriverImpl.inl"), functionTypes = instanceFuncs, className = "InstanceDriver")
+ writeFuncPtrInterfaceImpl (api, os.path.join(VULKAN_DIR, "vkDeviceDriverImpl.inl"), functionTypes = deviceFuncs, className = "DeviceDriver")
+ writeStrUtilProto (api, os.path.join(VULKAN_DIR, "vkStrUtil.inl"))
+ writeStrUtilImpl (api, os.path.join(VULKAN_DIR, "vkStrUtilImpl.inl"))
+ writeRefUtilProto (api, os.path.join(VULKAN_DIR, "vkRefUtil.inl"))
+ writeRefUtilImpl (api, os.path.join(VULKAN_DIR, "vkRefUtilImpl.inl"))
+ writeNullDriverImpl (api, os.path.join(VULKAN_DIR, "vkNullDriverImpl.inl"))
--- /dev/null
+# dEQP-VK
+
+add_subdirectory(api)
+add_subdirectory(pipeline)
+add_subdirectory(binding_model)
+
+include_directories(
+ api
+ pipeline
+ binding_model
+ )
+
+set(DEQP_VK_COMMON_SRCS
+ vktTestCase.cpp
+ vktTestCase.hpp
+ vktTestCaseUtil.cpp
+ vktTestCaseUtil.hpp
+ vktTestPackage.cpp
+ vktTestPackage.hpp
+ vktInfo.cpp
+ vktInfo.hpp
+ )
+
+set(DEQP_VK_COMMON_LIBS
+ tcutil
+ vkutil
+ glutil
+ deqp-vk-api
+ deqp-vk-pipeline
+ deqp-vk-binding-model
+ )
+
+if (DE_OS_IS_WIN32 OR DE_OS_IS_UNIX OR DE_OS_IS_OSX)
+ add_library(deqp-vk-common STATIC ${DEQP_VK_COMMON_SRCS})
+ target_link_libraries(deqp-vk-common ${DEQP_VK_COMMON_LIBS})
+
+ add_executable(vk-build-programs vktBuildPrograms.cpp)
+ target_link_libraries(vk-build-programs deqp-vk-common)
+
+ set(DEQP_VK_SRCS )
+ set(DEQP_VK_LIBS deqp-vk-common)
+
+else ()
+ set(DEQP_VK_SRCS ${DEQP_VK_COMMON_SRCS})
+ set(DEQP_VK_LIBS ${DEQP_VK_COMMON_LIBS})
+
+endif ()
+
+add_deqp_module(deqp-vk "${DEQP_VK_SRCS}" "${DEQP_VK_LIBS}" vktTestPackageEntry.cpp)
+
+add_data_dir(deqp-vk ../../data/vulkan vulkan)
--- /dev/null
+# API layer tests
+
+include_directories(..)
+
+set(DEQP_VK_API_SRCS
+ vktApiTests.cpp
+ vktApiTests.hpp
+ vktApiSmokeTests.cpp
+ vktApiSmokeTests.hpp
+ vktApiDeviceInitializationTests.cpp
+ vktApiDeviceInitializationTests.hpp
+ )
+
+set(DEQP_VK_API_LIBS
+ tcutil
+ vkutil
+ )
+
+add_library(deqp-vk-api STATIC ${DEQP_VK_API_SRCS})
+target_link_libraries(deqp-vk-api ${DEQP_VK_API_LIBS})
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Device Initialization Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktApiDeviceInitializationTests.hpp"
+#include "vktTestCaseUtil.hpp"
+
+#include "vkDefs.hpp"
+#include "vkPlatform.hpp"
+#include "vkStrUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkDeviceUtil.hpp"
+#include "vkApiVersion.hpp"
+
+#include "tcuTestLog.hpp"
+#include "tcuResultCollector.hpp"
+
+#include "deUniquePtr.hpp"
+#include "deStringUtil.hpp"
+
+#include <vector>
+
+namespace vkt
+{
+namespace api
+{
+
+namespace
+{
+
+using namespace vk;
+using namespace std;
+using std::vector;
+using tcu::TestLog;
+
+tcu::TestStatus createInstanceTest (Context& context)
+{
+ tcu::TestLog& log = context.getTestContext().getLog();
+ tcu::ResultCollector resultCollector (log);
+ const char* appNames[] = { "appName", DE_NULL, "", "app, name", "app(\"name\"", "app~!@#$%^&*()_+name", "app\nName", "app\r\nName" };
+ const char* engineNames[] = { "engineName", DE_NULL, "", "engine. name", "engine\"(name)", "eng~!@#$%^&*()_+name", "engine\nName", "engine\r\nName" };
+ const deUint32 appVersions[] = { 0, 1, (deUint32)-1 };
+ const deUint32 engineVersions[] = { 0, 1, (deUint32)-1 };
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ vector<VkApplicationInfo> appInfos;
+
+ // test over appName
+ for (int appNameNdx = 0; appNameNdx < DE_LENGTH_OF_ARRAY(appNames); appNameNdx++)
+ {
+ const VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ appNames[appNameNdx], // const char* pAppName;
+ 0u, // deUint32 appVersion;
+ "engineName", // const char* pEngineName;
+ 0u, // deUint32 engineVersion;
+ VK_API_VERSION, // deUint32 apiVersion;
+ };
+
+ appInfos.push_back(appInfo);
+ }
+
+ // test over engineName
+ for (int engineNameNdx = 0; engineNameNdx < DE_LENGTH_OF_ARRAY(engineNames); engineNameNdx++)
+ {
+ const VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ "appName", // const char* pAppName;
+ 0u, // deUint32 appVersion;
+ engineNames[engineNameNdx], // const char* pEngineName;
+ 0u, // deUint32 engineVersion;
+ VK_API_VERSION, // deUint32 apiVersion;
+ };
+
+ appInfos.push_back(appInfo);
+ }
+
+ // test over appVersion
+ for (int appVersionNdx = 0; appVersionNdx < DE_LENGTH_OF_ARRAY(appVersions); appVersionNdx++)
+ {
+ const VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ "appName", // const char* pAppName;
+ appVersions[appVersionNdx], // deUint32 appVersion;
+ "engineName", // const char* pEngineName;
+ 0u, // deUint32 engineVersion;
+ VK_API_VERSION, // deUint32 apiVersion;
+ };
+
+ appInfos.push_back(appInfo);
+ }
+
+ // test over engineVersion
+ for (int engineVersionNdx = 0; engineVersionNdx < DE_LENGTH_OF_ARRAY(engineVersions); engineVersionNdx++)
+ {
+ const VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ "appName", // const char* pAppName;
+ 0u, // deUint32 appVersion;
+ "engineName", // const char* pEngineName;
+ engineVersions[engineVersionNdx], // deUint32 engineVersion;
+ VK_API_VERSION, // deUint32 apiVersion;
+ };
+
+ appInfos.push_back(appInfo);
+ }
+
+ // run the tests!
+ for (size_t appInfoNdx = 0; appInfoNdx < appInfos.size(); ++appInfoNdx)
+ {
+ const VkApplicationInfo& appInfo = appInfos[appInfoNdx];
+ const VkInstanceCreateInfo instanceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ &appInfo, // const VkApplicationInfo* pAppInfo;
+ DE_NULL, // const VkAllocCallbacks* pAllocCb;
+ 0u, // deUint32 layerCount;
+ DE_NULL, // const char*const* ppEnabledLayernames;
+ 0u, // deUint32 extensionCount;
+ DE_NULL, // const char*const* ppEnabledExtensionNames;
+ };
+
+ log << TestLog::Message << "Creating instance with appInfo: " << appInfo << TestLog::EndMessage;
+
+ try
+ {
+ const Unique<VkInstance> instance(createInstance(platformInterface, &instanceCreateInfo));
+ log << TestLog::Message << "Succeeded" << TestLog::EndMessage;
+ }
+ catch (const vk::Error& err)
+ {
+ resultCollector.fail("Failed, Error code: " + de::toString(err.getMessage()));
+ }
+ }
+
+ return tcu::TestStatus(resultCollector.getResult(), resultCollector.getMessage());
+}
+
+tcu::TestStatus createInstanceWithInvalidApiVersionTest (Context& context)
+{
+ tcu::TestLog& log = context.getTestContext().getLog();
+ tcu::ResultCollector resultCollector (log);
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ const ApiVersion apiVersion = unpackVersion(VK_API_VERSION);
+ const deUint32 invalidMajorVersion = (1 << 10) - 1;
+ const deUint32 invalidMinorVersion = (1 << 10) - 1;
+ const deUint32 invalidPatchNum = (1 << 12) - 1;
+ vector<ApiVersion> invalidApiVersions;
+
+ invalidApiVersions.push_back(ApiVersion(invalidMajorVersion, apiVersion.minorNum, apiVersion.patchNum));
+ invalidApiVersions.push_back(ApiVersion(apiVersion.majorNum, invalidMinorVersion, apiVersion.patchNum));
+ invalidApiVersions.push_back(ApiVersion(apiVersion.majorNum, apiVersion.minorNum, invalidPatchNum));
+
+ for (size_t apiVersionNdx = 0; apiVersionNdx < invalidApiVersions.size(); apiVersionNdx++)
+ {
+ const VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ "appName", // const char* pAppName;
+ 0u, // deUint32 appVersion;
+ "engineName", // const char* pEngineName;
+ 0u, // deUint32 engineVersion;
+ pack(invalidApiVersions[apiVersionNdx]), // deUint32 apiVersion;
+ };
+ const VkInstanceCreateInfo instanceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ &appInfo, // const VkApplicationInfo* pAppInfo;
+ DE_NULL, // const VkAllocCallbacks* pAllocCb;
+ 0u, // deUint32 layerCount;
+ DE_NULL, // const char*const* ppEnabledLayernames;
+ 0u, // deUint32 extensionCount;
+ DE_NULL, // const char*const* ppEnabledExtensionNames;
+ };
+
+
+ log << TestLog::Message
+ <<"VK_API_VERSION defined in vulkan.h: " << apiVersion
+ << ", api version used to create instance: " << invalidApiVersions[apiVersionNdx]
+ << TestLog::EndMessage;
+
+ try
+ {
+ const Unique<VkInstance> instance(createInstance(platformInterface, &instanceCreateInfo));
+
+ resultCollector.fail("Fail, instance creation with invalid apiVersion is not rejected");
+ }
+ catch (const tcu::NotSupportedError&)
+ {
+ log << TestLog::Message << "Pass, instance creation with invalid apiVersion is rejected" << TestLog::EndMessage;
+ }
+ }
+
+ return tcu::TestStatus(resultCollector.getResult(), resultCollector.getMessage());
+}
+
+tcu::TestStatus createInstanceWithUnsupportedExtensionsTest (Context& context)
+{
+ tcu::TestLog& log = context.getTestContext().getLog();
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ const char* enabledExtensions[] = {"VK_UNSUPPORTED_EXTENSION", "THIS_IS_NOT_AN_EXTENSION"};
+ const VkApplicationInfo appInfo =
+ {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ "appName", // const char* pAppName;
+ 0u, // deUint32 appVersion;
+ "engineName", // const char* pEngineName;
+ 0u, // deUint32 engineVersion;
+ VK_API_VERSION, // deUint32 apiVersion;
+ };
+ const VkInstanceCreateInfo instanceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ &appInfo, // const VkApplicationInfo* pAppInfo;
+ DE_NULL, // const VkAllocCallbacks* pAllocCb;
+ 0u, // deUint32 layerCount;
+ DE_NULL, // const char*const* ppEnabledLayernames;
+ DE_LENGTH_OF_ARRAY(enabledExtensions), // deUint32 extensionCount;
+ enabledExtensions, // const char*const* ppEnabledExtensionNames;
+ };
+
+ try
+ {
+ Unique<VkInstance> instance(createInstance(platformInterface, &instanceCreateInfo));
+
+ log << TestLog::Message << "Enabled extensions are: " << TestLog::EndMessage;
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(enabledExtensions); ndx++)
+ log << TestLog::Message << enabledExtensions[ndx] << TestLog::EndMessage;
+
+ return tcu::TestStatus::fail("Fail, creating instance with unsupported extensions succeeded.");
+ }
+ catch (const tcu::NotSupportedError&)
+ {
+ return tcu::TestStatus::pass("Pass, creating instance with unsupported extension was rejected.");
+ }
+}
+
+tcu::TestStatus createDeviceTest (Context& context)
+{
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ const Unique<VkInstance> instance (createDefaultInstance(platformInterface));
+ const InstanceDriver instanceDriver (platformInterface, instance.get());
+ const VkPhysicalDevice physicalDevice = chooseDevice(instanceDriver, instance.get(), context.getTestContext().getCommandLine());
+ const deUint32 queueFamilyIndex = 0;
+ const deUint32 queueCount = 1;
+ const deUint32 queueIndex = 0;
+ VkDeviceQueueCreateInfo deviceQueueCreateInfo =
+ {
+ queueFamilyIndex, //queueFamilyIndex;
+ queueCount //queueCount;
+ };
+ VkDeviceCreateInfo deviceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
+ DE_NULL, //pNext;
+ 1, //queueRecordCount;
+ &deviceQueueCreateInfo, //pRequestedQueues;
+ 0, //layerCount;
+ DE_NULL, //ppEnabledLayerNames;
+ 0, //extensionCount;
+ DE_NULL, //ppEnabledExtensionNames;
+ DE_NULL, //pEnabledFeatures;
+ VK_DEVICE_CREATE_VALIDATION_BIT, //flags;
+ };
+
+ const Unique<VkDevice> device (createDevice(instanceDriver, physicalDevice, &deviceCreateInfo));
+ const DeviceDriver deviceDriver (instanceDriver, device.get());
+ VkQueue queue;
+
+ VK_CHECK(deviceDriver.getDeviceQueue(device.get(), queueFamilyIndex, queueIndex, &queue));
+ VK_CHECK(deviceDriver.queueWaitIdle(queue));
+
+ return tcu::TestStatus::pass("Pass");
+}
+
+tcu::TestStatus createMultipleDevicesTest (Context& context)
+{
+ tcu::TestLog& log = context.getTestContext().getLog();
+ tcu::ResultCollector resultCollector (log);
+ const int numDevices = 5;
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ const Unique<VkInstance> instance (createDefaultInstance(platformInterface));
+ const InstanceDriver instanceDriver (platformInterface, instance.get());
+ const VkPhysicalDevice physicalDevice = chooseDevice(instanceDriver, instance.get(), context.getTestContext().getCommandLine());
+ const deUint32 queueFamilyIndex = 0;
+ const deUint32 queueCount = 1;
+ const deUint32 queueIndex = 0;
+ const VkDeviceQueueCreateInfo deviceQueueCreateInfo =
+ {
+ queueFamilyIndex, //queueFamilyIndex;
+ queueCount //queueCount;
+ };
+ const VkDeviceCreateInfo deviceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
+ DE_NULL, //pNext;
+ 1, //queueRecordCount;
+ &deviceQueueCreateInfo, //pRequestedQueues;
+ 0, //layerCount;
+ DE_NULL, //ppEnabledLayerNames;
+ 0, //extensionCount;
+ DE_NULL, //ppEnabledExtensionNames;
+ DE_NULL, //pEnabledFeatures;
+ VK_DEVICE_CREATE_VALIDATION_BIT, //flags;
+ };
+ VkResult result;
+ vector<VkDevice> devices(numDevices, (VkDevice)DE_NULL);
+
+ try
+ {
+ for (int deviceNdx = 0; deviceNdx < numDevices; deviceNdx++)
+ {
+ result = instanceDriver.createDevice(physicalDevice, &deviceCreateInfo, &devices[deviceNdx]);
+
+ if (result != VK_SUCCESS)
+ {
+ resultCollector.fail("Failed to create Device No." + de::toString(deviceNdx) + ", Error Code: " + de::toString(result));
+ break;
+ }
+
+ {
+ const DeviceDriver deviceDriver(instanceDriver, devices[deviceNdx]);
+ VkQueue queue;
+
+ DE_ASSERT(queueIndex < queueCount);
+ VK_CHECK(deviceDriver.getDeviceQueue(devices[deviceNdx], queueFamilyIndex, queueIndex, &queue));
+ VK_CHECK(deviceDriver.queueWaitIdle(queue));
+ }
+ }
+ }
+ catch (const vk::Error& error)
+ {
+ resultCollector.fail(de::toString(error.getError()));
+ }
+ catch (...)
+ {
+ for (int deviceNdx = (int)devices.size()-1; deviceNdx >= 0; deviceNdx--)
+ {
+ if (devices[deviceNdx] != (VkDevice)DE_NULL)
+ {
+ DeviceDriver deviceDriver(instanceDriver, devices[deviceNdx]);
+ result = deviceDriver.destroyDevice(devices[deviceNdx]);
+ }
+ }
+
+ throw;
+ }
+
+ for (int deviceNdx = (int)devices.size()-1; deviceNdx >= 0; deviceNdx--)
+ {
+ if (devices[deviceNdx] != (VkDevice)DE_NULL)
+ {
+ DeviceDriver deviceDriver(instanceDriver, devices[deviceNdx]);
+ result = deviceDriver.destroyDevice(devices[deviceNdx]);
+
+ if (result != VK_SUCCESS)
+ resultCollector.fail("Failed to destroy Device No. " + de::toString(deviceNdx) + ", Error Code: " + de::toString(result));
+ }
+ }
+
+ return tcu::TestStatus(resultCollector.getResult(), resultCollector.getMessage());
+}
+
+tcu::TestStatus createDeviceWithUnsupportedExtensionsTest (Context& context)
+{
+ tcu::TestLog& log = context.getTestContext().getLog();
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ const Unique<VkInstance> instance (createDefaultInstance(platformInterface));
+ const InstanceDriver instanceDriver (platformInterface, instance.get());
+ const char* enabledExtensions[] = {"VK_UNSUPPORTED_EXTENSION", "THIS_IS_NOT_AN_EXTENSION", "VK_DONT_SUPPORT_ME"};
+ const VkPhysicalDevice physicalDevice = chooseDevice(instanceDriver, instance.get(), context.getTestContext().getCommandLine());
+ const VkDeviceQueueCreateInfo deviceQueueCreateInfo =
+ {
+ 0, //queueFamiliIndex;
+ 1, //queueCount;
+ };
+ const VkDeviceCreateInfo deviceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
+ DE_NULL, //pNext;
+ 1, //queueRecordCount;
+ &deviceQueueCreateInfo, //pRequestedQueues;
+ 0, //layerCount;
+ DE_NULL, //ppEnabledLayerNames;
+ DE_LENGTH_OF_ARRAY(enabledExtensions), //extensionCount;
+ enabledExtensions, //ppEnabledExtensionNames;
+ DE_NULL, //pEnabledFeatures;
+ VK_DEVICE_CREATE_VALIDATION_BIT, //flags;
+ };
+
+ try
+ {
+ Unique<VkDevice> device(createDevice(instanceDriver, physicalDevice, &deviceCreateInfo));
+
+ log << TestLog::Message << "Enabled extensions are: " << TestLog::EndMessage;
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(enabledExtensions); ndx++)
+ log << TestLog::Message << enabledExtensions[ndx] << TestLog::EndMessage;
+
+ return tcu::TestStatus::fail("Fail, create device with unsupported extension but succeed.");
+ }
+ catch (const tcu::NotSupportedError&)
+ {
+ return tcu::TestStatus::pass("Pass, create device with unsupported extension is rejected.");
+ }
+}
+
+tcu::TestStatus createDeviceWithVariousQueueCountsTest (Context& context)
+{
+ tcu::TestLog& log = context.getTestContext().getLog();
+ const int queueCountDiff = 1;
+ const PlatformInterface& platformInterface = context.getPlatformInterface();
+ const Unique<VkInstance> instance (createDefaultInstance(platformInterface));
+ const InstanceDriver instanceDriver (platformInterface, instance.get());
+ const VkPhysicalDevice physicalDevice = chooseDevice(instanceDriver, instance.get(), context.getTestContext().getCommandLine());
+ const vector<VkPhysicalDeviceQueueProperties> physicalDeviceQueueProperties = getPhysicalDeviceQueueProperties(instanceDriver, physicalDevice);
+ vector<VkDeviceQueueCreateInfo> deviceQueueCreateInfos;
+ VkResult result;
+
+ for (deUint32 queueFamilyNdx = 0; queueFamilyNdx < (deUint32)physicalDeviceQueueProperties.size(); queueFamilyNdx++)
+ {
+ const deUint32 maxQueueCount = physicalDeviceQueueProperties[queueFamilyNdx].queueCount;
+
+ for (deUint32 queueCount = 1; queueCount <= maxQueueCount; queueCount += queueCountDiff)
+ {
+ const VkDeviceQueueCreateInfo queueCreateInfo =
+ {
+ queueFamilyNdx,
+ queueCount
+ };
+
+ deviceQueueCreateInfos.push_back(queueCreateInfo);
+ }
+ }
+
+ for (size_t testNdx = 0; testNdx < deviceQueueCreateInfos.size(); testNdx++)
+ {
+ const VkDeviceQueueCreateInfo& queueCreateInfo = deviceQueueCreateInfos[testNdx];
+ const VkDeviceCreateInfo deviceCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
+ DE_NULL, //pNext;
+ 1, //queueRecordCount;
+ &queueCreateInfo, //pRequestedQueues;
+ 0, //layerCount;
+ DE_NULL, //ppEnabledLayerNames;
+ 0, //extensionCount;
+ DE_NULL, //ppEnabledExtensionNames;
+ DE_NULL, //pEnabledFeatures;
+ VK_DEVICE_CREATE_VALIDATION_BIT, //flags;
+ };
+ const Unique<VkDevice> device (createDevice(instanceDriver, physicalDevice, &deviceCreateInfo));
+ const DeviceDriver deviceDriver (instanceDriver, device.get());
+ const deUint32 queueFamilyIndex = deviceCreateInfo.pRequestedQueues->queueFamilyIndex;
+ const deUint32 queueCount = deviceCreateInfo.pRequestedQueues->queueCount;
+
+ for (deUint32 queueIndex = 0; queueIndex < queueCount; queueIndex++)
+ {
+ VkQueue queue;
+ result = deviceDriver.getDeviceQueue(device.get(), queueFamilyIndex, queueIndex, &queue);
+
+ if (result != VK_SUCCESS)
+ {
+ log << TestLog::Message
+ << "Fail to getDeviceQueue"
+ << ", queueIndex = " << queueIndex
+ << ", queueCreateInfo " << queueCreateInfo
+ << ", Error Code: " << result
+ << TestLog::EndMessage;
+ return tcu::TestStatus::fail("Fail");
+ }
+
+ result = deviceDriver.queueWaitIdle(queue);
+ if (result != VK_SUCCESS)
+ {
+ log << TestLog::Message
+ << "vkQueueWaitIdle failed"
+ << ", queueIndex = " << queueIndex
+ << ", queueCreateInfo " << queueCreateInfo
+ << ", Error Code: " << result
+ << TestLog::EndMessage;
+ return tcu::TestStatus::fail("Fail");
+ }
+ }
+ }
+ return tcu::TestStatus::pass("Pass");
+}
+
+} // anonymous
+
+tcu::TestCaseGroup* createDeviceInitializationTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> deviceInitializationTests (new tcu::TestCaseGroup(testCtx, "device_init", "Device Initialization Tests"));
+
+ addFunctionCase(deviceInitializationTests.get(), "create_instance_name_version", "", createInstanceTest);
+ addFunctionCase(deviceInitializationTests.get(), "create_instance_invalid_api_version", "", createInstanceWithInvalidApiVersionTest);
+ addFunctionCase(deviceInitializationTests.get(), "create_instance_unsupported_extensions", "", createInstanceWithUnsupportedExtensionsTest);
+ addFunctionCase(deviceInitializationTests.get(), "create_device", "", createDeviceTest);
+ addFunctionCase(deviceInitializationTests.get(), "create_multiple_devices", "", createMultipleDevicesTest);
+ addFunctionCase(deviceInitializationTests.get(), "create_device_unsupported_extensions", "", createDeviceWithUnsupportedExtensionsTest);
+ addFunctionCase(deviceInitializationTests.get(), "create_device_various_queue_counts", "", createDeviceWithVariousQueueCountsTest);
+
+ return deviceInitializationTests.release();
+}
+
+} // api
+} // vkt
--- /dev/null
+#ifndef _VKTAPIDEVICEINITIALIZATIONTESTS_HPP
+#define _VKTAPIDEVICEINITIALIZATIONTESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Device Initialization tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace api
+{
+
+tcu::TestCaseGroup* createDeviceInitializationTests (tcu::TestContext& testCtx);
+
+} // api
+} // vkt
+
+#endif // _VKTAPIDEVICEINITIALIZATIONTESTS_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Simple Smoke Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktApiTests.hpp"
+
+#include "vktTestCaseUtil.hpp"
+
+#include "vkDefs.hpp"
+#include "vkPlatform.hpp"
+#include "vkStrUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkDeviceUtil.hpp"
+#include "vkPrograms.hpp"
+
+#include "tcuTestLog.hpp"
+#include "tcuFormatUtil.hpp"
+
+#include "deUniquePtr.hpp"
+
+namespace vkt
+{
+namespace api
+{
+
+namespace
+{
+
+using namespace vk;
+using std::vector;
+using tcu::TestLog;
+using de::UniquePtr;
+
+tcu::TestStatus createSamplerTest (Context& context)
+{
+ const VkDevice vkDevice = context.getDevice();
+ const DeviceInterface& vk = context.getDeviceInterface();
+
+ {
+ const struct VkSamplerCreateInfo samplerInfo =
+ {
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_TEX_FILTER_NEAREST, // VkTexFilter magFilter;
+ VK_TEX_FILTER_NEAREST, // VkTexFilter minFilter;
+ VK_TEX_MIPMAP_MODE_BASE, // VkTexMipmapMode mipMode;
+ VK_TEX_ADDRESS_CLAMP, // VkTexAddress addressU;
+ VK_TEX_ADDRESS_CLAMP, // VkTexAddress addressV;
+ VK_TEX_ADDRESS_CLAMP, // VkTexAddress addressW;
+ 0.0f, // float mipLodBias;
+ 0.0f, // float maxAnisotropy;
+ DE_FALSE, // VkBool32 compareEnable;
+ VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
+ 0.0f, // float minLod;
+ 0.0f, // float maxLod;
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
+ };
+
+ Move<VkSampler> tmpSampler = createSampler(vk, vkDevice, &samplerInfo);
+ Move<VkSampler> tmp2Sampler;
+
+ tmp2Sampler = tmpSampler;
+
+ const Unique<VkSampler> sampler (tmp2Sampler);
+ }
+
+ return tcu::TestStatus::pass("Creating sampler succeeded");
+}
+
+void createShaderProgs (SourceCollections& dst)
+{
+ dst.glslSources.add("test") << glu::VertexSource(
+ "#version 300 es\n"
+ "in highp vec4 a_position;\n"
+ "void main (void) { gl_Position = a_position; }\n");
+}
+
+tcu::TestStatus createShaderModuleTest (Context& context)
+{
+ const VkDevice vkDevice = context.getDevice();
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const Unique<VkShaderModule> shader (createShaderModule(vk, vkDevice, context.getBinaryCollection().get("test"), 0));
+
+ return tcu::TestStatus::pass("Creating shader module succeeded");
+}
+
+void createTriangleAsmProgs (SourceCollections& dst)
+{
+ dst.spirvAsmSources.add("vert") <<
+ " OpSource ESSL 300\n"
+ " OpCapability Shader\n"
+ "%1 = OpExtInstImport \"GLSL.std.450\"\n"
+ " OpMemoryModel Logical GLSL450\n"
+ " OpEntryPoint Vertex %4 \"main\"\n"
+ " OpName %4 \"main\"\n"
+ " OpName %10 \"gl_Position\"\n"
+ " OpName %12 \"a_position\"\n"
+ " OpName %16 \"gl_VertexID\"\n"
+ " OpName %17 \"gl_InstanceID\"\n"
+ " OpDecorate %10 BuiltIn Position\n"
+ " OpDecorate %12 Location 0\n"
+ " OpDecorate %16 BuiltIn VertexId\n"
+ " OpDecorate %16 NoStaticUse\n"
+ " OpDecorate %17 BuiltIn InstanceId\n"
+ " OpDecorate %17 NoStaticUse\n"
+ "%2 = OpTypeVoid\n"
+ "%3 = OpTypeFunction %2\n"
+ "%7 = OpTypeFloat 32\n"
+ "%8 = OpTypeVector %7 4\n"
+ "%9 = OpTypePointer Output %8\n"
+ "%10 = OpVariable %9 Output\n"
+ "%11 = OpTypePointer Input %8\n"
+ "%12 = OpVariable %11 Input\n"
+ "%14 = OpTypeInt 32 1\n"
+ "%15 = OpTypePointer Input %14\n"
+ "%16 = OpVariable %15 Input\n"
+ "%17 = OpVariable %15 Input\n"
+ "%4 = OpFunction %2 None %3\n"
+ "%5 = OpLabel\n"
+ "%13 = OpLoad %8 %12\n"
+ " OpStore %10 %13\n"
+ " OpBranch %6\n"
+ "%6 = OpLabel\n"
+ " OpReturn\n"
+ " OpFunctionEnd\n";
+ dst.spirvAsmSources.add("frag") <<
+ " OpSource ESSL 300\n"
+ " OpCapability Shader\n"
+ "%1 = OpExtInstImport \"GLSL.std.450\"\n"
+ " OpMemoryModel Logical GLSL450\n"
+ " OpEntryPoint Fragment %4 \"main\"\n"
+ " OpExecutionMode %4 OriginLowerLeft\n"
+ " OpName %4 \"main\"\n"
+ " OpName %10 \"o_color\"\n"
+ " OpDecorate %10 RelaxedPrecision\n"
+ " OpDecorate %10 Location 0\n"
+ "%2 = OpTypeVoid\n"
+ "%3 = OpTypeFunction %2\n"
+ "%7 = OpTypeFloat 32\n"
+ "%8 = OpTypeVector %7 4\n"
+ "%9 = OpTypePointer Output %8\n"
+ "%10 = OpVariable %9 Output\n"
+ "%11 = OpConstant %7 1065353216\n"
+ "%12 = OpConstant %7 0\n"
+ "%13 = OpConstantComposite %8 %11 %12 %11 %11\n"
+ "%4 = OpFunction %2 None %3\n"
+ "%5 = OpLabel\n"
+ " OpStore %10 %13\n"
+ " OpBranch %6\n"
+ "%6 = OpLabel\n"
+ " OpReturn\n"
+ " OpFunctionEnd\n";
+}
+
+void createTriangleProgs (SourceCollections& dst)
+{
+ dst.glslSources.add("vert") << glu::VertexSource(
+ "#version 300 es\n"
+ "layout(location = 0) in highp vec4 a_position;\n"
+ "void main (void) { gl_Position = a_position; }\n");
+ dst.glslSources.add("frag") << glu::FragmentSource(
+ "#version 300 es\n"
+ "layout(location = 0) out lowp vec4 o_color;\n"
+ "void main (void) { o_color = vec4(1.0, 0.0, 1.0, 1.0); }\n");
+}
+
+tcu::TestStatus renderTriangleTest (Context& context)
+{
+ const VkDevice vkDevice = context.getDevice();
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const VkQueue queue = context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ SimpleAllocator memAlloc (vk, vkDevice, getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()));
+ const tcu::IVec2 renderSize (256, 256);
+
+ const tcu::Vec4 vertices[] =
+ {
+ tcu::Vec4(-0.5f, -0.5f, 0.0f, 1.0f),
+ tcu::Vec4(+0.5f, -0.5f, 0.0f, 1.0f),
+ tcu::Vec4( 0.0f, +0.5f, 0.0f, 1.0f)
+ };
+
+ const VkBufferCreateInfo vertexBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkDeviceSize)sizeof(vertices), // VkDeviceSize size;
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ 0u, // VkBufferCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyCount;
+ &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
+ };
+ const Unique<VkBuffer> vertexBuffer (createBuffer(vk, vkDevice, &vertexBufferParams));
+ const UniquePtr<Allocation> vertexBufferMemory (memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *vertexBuffer), MemoryRequirement::HostVisible));
+
+ VK_CHECK(vk.bindBufferMemory(vkDevice, *vertexBuffer, vertexBufferMemory->getMemory(), vertexBufferMemory->getOffset()));
+
+ const VkDeviceSize imageSizeBytes = (VkDeviceSize)(sizeof(deUint32)*renderSize.x()*renderSize.y());
+ const VkBufferCreateInfo readImageBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ imageSizeBytes, // VkDeviceSize size;
+ VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, // VkBufferUsageFlags usage;
+ 0u, // VkBufferCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyCount;
+ &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
+ };
+ const Unique<VkBuffer> readImageBuffer (createBuffer(vk, vkDevice, &readImageBufferParams));
+ const UniquePtr<Allocation> readImageBufferMemory (memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *readImageBuffer), MemoryRequirement::HostVisible));
+
+ VK_CHECK(vk.bindBufferMemory(vkDevice, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
+
+ const VkImageCreateInfo imageParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_IMAGE_TYPE_2D, // VkImageType imageType;
+ VK_FORMAT_R8G8B8A8_UNORM, // VkFormat format;
+ { renderSize.x(), renderSize.y(), 1 }, // VkExtent3D extent;
+ 1u, // deUint32 mipLevels;
+ 1u, // deUint32 arraySize;
+ 1u, // deUint32 samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT, // VkImageUsageFlags usage;
+ 0u, // VkImageCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyCount;
+ &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
+ };
+
+ const Unique<VkImage> image (createImage(vk, vkDevice, &imageParams));
+ const UniquePtr<Allocation> imageMemory (memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *image), MemoryRequirement::Any));
+
+ VK_CHECK(vk.bindImageMemory(vkDevice, *image, imageMemory->getMemory(), imageMemory->getOffset()));
+
+ const VkAttachmentDescription colorAttDesc =
+ {
+ VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_FORMAT_R8G8B8A8_UNORM, // VkFormat format;
+ 1u, // deUint32 samples;
+ VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
+ VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
+ };
+ const VkAttachmentReference colorAttRef =
+ {
+ 0u, // deUint32 attachment;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout layout;
+ };
+ const VkSubpassDescription subpassDesc =
+ {
+ VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
+ 0u, // VkSubpassDescriptionFlags flags;
+ 0u, // deUint32 inputCount;
+ DE_NULL, // const VkAttachmentReference* inputAttachments;
+ 1u, // deUint32 colorCount;
+ &colorAttRef, // const VkAttachmentReference* colorAttachments;
+ DE_NULL, // const VkAttachmentReference* resolveAttachments;
+ { VK_NO_ATTACHMENT, VK_IMAGE_LAYOUT_GENERAL }, // VkAttachmentReference depthStencilAttachment;
+ 0u, // deUint32 preserveCount;
+ DE_NULL, // const VkAttachmentReference* preserveAttachments;
+
+ };
+ const VkRenderPassCreateInfo renderPassParams =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 attachmentCount;
+ &colorAttDesc, // const VkAttachmentDescription* pAttachments;
+ 1u, // deUint32 subpassCount;
+ &subpassDesc, // const VkSubpassDescription* pSubpasses;
+ 0u, // deUint32 dependencyCount;
+ DE_NULL, // const VkSubpassDependency* pDependencies;
+ };
+ const Unique<VkRenderPass> renderPass (createRenderPass(vk, vkDevice, &renderPassParams));
+
+ const VkAttachmentViewCreateInfo colorAttViewParams =
+ {
+ VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *image, // VkImage image;
+ VK_FORMAT_R8G8B8A8_UNORM, // VkFormat format;
+ 0u, // deUint32 mipLevel;
+ 0u, // deUint32 baseArraySlice;
+ 1u, // deUint32 arraySize;
+ 0u, // VkAttachmentViewCreateFlags flags;
+ };
+ const Unique<VkAttachmentView> colorAttView (createAttachmentView(vk, vkDevice, &colorAttViewParams));
+
+ const Unique<VkShaderModule> vertShaderModule (createShaderModule(vk, vkDevice, context.getBinaryCollection().get("vert"), 0));
+ const VkShaderCreateInfo vertShaderParams =
+ {
+ VK_STRUCTURE_TYPE_SHADER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *vertShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ 0u, // VkShaderCreateFlags flags;
+ };
+ const Unique<VkShader> vertShader (createShader(vk, vkDevice, &vertShaderParams));
+ const Unique<VkShaderModule> fragShaderModule (createShaderModule(vk, vkDevice, context.getBinaryCollection().get("frag"), 0));
+ const VkShaderCreateInfo fragShaderParams =
+ {
+ VK_STRUCTURE_TYPE_SHADER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *fragShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ 0u, // VkShaderCreateFlags flags;
+ };
+ const Unique<VkShader> fragShader (createShader(vk, vkDevice, &fragShaderParams));
+
+ // Pipeline layout
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // deUint32 descriptorSetCount;
+ DE_NULL, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL, // const VkPushConstantRange* pPushConstantRanges;
+ };
+ const Unique<VkPipelineLayout> pipelineLayout (createPipelineLayout(vk, vkDevice, &pipelineLayoutParams));
+
+ // Pipeline
+ const VkSpecializationInfo emptyShaderSpecParams =
+ {
+ 0u, // deUint32 mapEntryCount;
+ DE_NULL, // const VkSpecializationMapEntry* pMap;
+ 0, // const deUintptr dataSize;
+ DE_NULL, // const void* pData;
+ };
+ const VkPipelineShaderStageCreateInfo shaderStageParams[] =
+ {
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_SHADER_STAGE_VERTEX, // VkShaderStage stage;
+ *vertShader, // VkShader shader;
+ &emptyShaderSpecParams, // const VkSpecializationInfo* pSpecializationInfo;
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_SHADER_STAGE_FRAGMENT, // VkShaderStage stage;
+ *fragShader, // VkShader shader;
+ &emptyShaderSpecParams, // const VkSpecializationInfo* pSpecializationInfo;
+ }
+ };
+ const VkPipelineDepthStencilStateCreateInfo depthStencilParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ DE_FALSE, // deUint32 depthTestEnable;
+ DE_FALSE, // deUint32 depthWriteEnable;
+ VK_COMPARE_OP_ALWAYS, // VkCompareOp depthCompareOp;
+ DE_FALSE, // deUint32 depthBoundsEnable;
+ DE_FALSE, // deUint32 stencilTestEnable;
+ {
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilFailOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilPassOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilDepthFailOp;
+ VK_COMPARE_OP_ALWAYS, // VkCompareOp stencilCompareOp;
+ }, // VkStencilOpState front;
+ {
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilFailOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilPassOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilDepthFailOp;
+ VK_COMPARE_OP_ALWAYS, // VkCompareOp stencilCompareOp;
+ } // VkStencilOpState back;
+ };
+ const VkPipelineViewportStateCreateInfo viewportParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 viewportCount;
+ };
+ const VkPipelineMultisampleStateCreateInfo multisampleParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 rasterSamples;
+ DE_FALSE, // deUint32 sampleShadingEnable;
+ 0.0f, // float minSampleShading;
+ ~0u, // VkSampleMask sampleMask;
+ };
+ const VkPipelineRasterStateCreateInfo rasterParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTER_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ DE_TRUE, // deUint32 depthClipEnable;
+ DE_FALSE, // deUint32 rasterizerDiscardEnable;
+ VK_FILL_MODE_SOLID, // VkFillMode fillMode;
+ VK_CULL_MODE_NONE, // VkCullMode cullMode;
+ VK_FRONT_FACE_CCW, // VkFrontFace frontFace;
+ };
+ const VkPipelineInputAssemblyStateCreateInfo inputAssemblyParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology;
+ DE_FALSE, // deUint32 primitiveRestartEnable;
+ };
+ const VkVertexInputBindingDescription vertexBinding0 =
+ {
+ 0u, // deUint32 binding;
+ (deUint32)sizeof(tcu::Vec4), // deUint32 strideInBytes;
+ VK_VERTEX_INPUT_STEP_RATE_VERTEX, // VkVertexInputStepRate stepRate;
+ };
+ const VkVertexInputAttributeDescription vertexAttrib0 =
+ {
+ 0u, // deUint32 location;
+ 0u, // deUint32 binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ 0u, // deUint32 offsetInBytes;
+ };
+ const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 bindingCount;
+ &vertexBinding0, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ 1u, // deUint32 attributeCount;
+ &vertexAttrib0, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+ };
+ const VkPipelineColorBlendAttachmentState attBlendParams =
+ {
+ DE_FALSE, // deUint32 blendEnable;
+ VK_BLEND_ONE, // VkBlend srcBlendColor;
+ VK_BLEND_ZERO, // VkBlend destBlendColor;
+ VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
+ VK_BLEND_ONE, // VkBlend srcBlendAlpha;
+ VK_BLEND_ZERO, // VkBlend destBlendAlpha;
+ VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
+ VK_CHANNEL_R_BIT|VK_CHANNEL_G_BIT|VK_CHANNEL_B_BIT|VK_CHANNEL_A_BIT, // VkChannelFlags channelWriteMask;
+ };
+ const VkPipelineColorBlendStateCreateInfo blendParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ DE_FALSE, // VkBool32 alphaToCoverageEnable;
+ DE_FALSE, // VkBool32 logicOpEnable;
+ VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
+ 1u, // deUint32 attachmentCount;
+ &attBlendParams, // const VkPipelineColorBlendAttachmentState* pAttachments;
+ };
+ const VkGraphicsPipelineCreateInfo pipelineParams =
+ {
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (deUint32)DE_LENGTH_OF_ARRAY(shaderStageParams), // deUint32 stageCount;
+ shaderStageParams, // const VkPipelineShaderStageCreateInfo* pStages;
+ &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ &inputAssemblyParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ &viewportParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
+ &rasterParams, // const VkPipelineRasterStateCreateInfo* pRasterState;
+ &multisampleParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ &depthStencilParams, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ &blendParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ 0u, // VkPipelineCreateFlags flags;
+ *pipelineLayout, // VkPipelineLayout layout;
+ *renderPass, // VkRenderPass renderPass;
+ 0u, // deUint32 subpass;
+ DE_NULL, // VkPipeline basePipelineHandle;
+ 0u, // deInt32 basePipelineIndex;
+ };
+
+ const Unique<VkPipeline> pipeline (createGraphicsPipeline(vk, vkDevice, DE_NULL, &pipelineParams));
+
+ // Framebuffer
+ const VkAttachmentBindInfo colorBinding0 =
+ {
+ *colorAttView, // VkColorAttachmentView view;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout layout;
+ };
+ const VkFramebufferCreateInfo framebufferParams =
+ {
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *renderPass, // VkRenderPass renderPass;
+ 1u, // deUint32 attachmentCount;
+ &colorBinding0, // const VkAttachmentBindInfo* pAttachments;
+ (deUint32)renderSize.x(), // deUint32 width;
+ (deUint32)renderSize.y(), // deUint32 height;
+ 1u, // deUint32 layers;
+ };
+ const Unique<VkFramebuffer> framebuffer (createFramebuffer(vk, vkDevice, &framebufferParams));
+
+ // Viewport state
+ const VkViewport viewport0 =
+ {
+ 0.0f, // float originX;
+ 0.0f, // float originY;
+ (float)renderSize.x(), // float width;
+ (float)renderSize.y(), // float height;
+ 0.0f, // float minDepth;
+ 1.0f, // float maxDepth;
+ };
+ const VkRect2D scissor0 =
+ {
+ {
+ 0u, // deInt32 x;
+ 0u, // deInt32 y;
+ }, // VkOffset2D offset;
+ {
+ renderSize.x(), // deInt32 width;
+ renderSize.y(), // deInt32 height;
+ }, // VkExtent2D extent;
+ };
+ const VkDynamicViewportStateCreateInfo dynViewportStateParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 viewportAndScissorCount;
+ &viewport0, // const VkViewport* pViewports;
+ &scissor0, // const VkRect* pScissors;
+ };
+ const Unique<VkDynamicViewportState> dynViewportState (createDynamicViewportState(vk, vkDevice, &dynViewportStateParams));
+
+ const VkDynamicRasterStateCreateInfo dynRasterStateParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0.0f, // float depthBias;
+ 0.0f, // float depthBiasClamp;
+ 0.0f, // float slopeScaledDepthBias;
+ 1.0f, // float lineWidth;
+ };
+ const Unique<VkDynamicRasterState> dynRasterState (createDynamicRasterState(vk, vkDevice, &dynRasterStateParams));
+
+ const VkDynamicDepthStencilStateCreateInfo dynDepthStencilParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0.0f, // float minDepthBounds;
+ 1.0f, // float maxDepthBounds;
+ 0u, // deUint32 stencilReadMask;
+ 0u, // deUint32 stencilWriteMask;
+ 0u, // deUint32 stencilFrontRef;
+ 0u, // deUint32 stencilBackRef;
+ };
+ const Unique<VkDynamicDepthStencilState> dynDepthStencilState (createDynamicDepthStencilState(vk, vkDevice, &dynDepthStencilParams));
+
+ const VkCmdPoolCreateInfo cmdPoolParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ queueFamilyIndex, // deUint32 queueFamilyIndex;
+ VK_CMD_POOL_CREATE_RESET_COMMAND_BUFFER_BIT // VkCmdPoolCreateFlags flags;
+ };
+ const Unique<VkCmdPool> cmdPool (createCommandPool(vk, vkDevice, &cmdPoolParams));
+
+ // Command buffer
+ const VkCmdBufferCreateInfo cmdBufParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *cmdPool, // VkCmdPool pool;
+ VK_CMD_BUFFER_LEVEL_PRIMARY, // VkCmdBufferLevel level;
+ 0u, // VkCmdBufferCreateFlags flags;
+ };
+ const Unique<VkCmdBuffer> cmdBuf (createCommandBuffer(vk, vkDevice, &cmdBufParams));
+
+ const VkCmdBufferBeginInfo cmdBufBeginParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkCmdBufferOptimizeFlags flags;
+ DE_NULL, // VkRenderPass renderPass;
+ DE_NULL, // VkFramebuffer framebuffer;
+ };
+
+ // Record commands
+ VK_CHECK(vk.beginCommandBuffer(*cmdBuf, &cmdBufBeginParams));
+
+ {
+ const VkMemoryBarrier vertFlushBarrier =
+ {
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // VkMemoryOutputFlags outputMask;
+ VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT, // VkMemoryInputFlags inputMask;
+ };
+ const VkImageMemoryBarrier colorAttBarrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkMemoryOutputFlags outputMask;
+ VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT, // VkMemoryInputFlags inputMask;
+ VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
+ queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
+ queueFamilyIndex, // deUint32 destQueueFamilyIndex;
+ *image, // VkImage image;
+ {
+ VK_IMAGE_ASPECT_COLOR, // VkImageAspect aspect;
+ 0u, // deUint32 baseMipLevel;
+ 1u, // deUint32 mipLevels;
+ 0u, // deUint32 baseArraySlice;
+ 1u, // deUint32 arraySize;
+ } // VkImageSubresourceRange subresourceRange;
+ };
+ const void* barriers[] = { &vertFlushBarrier, &colorAttBarrier };
+ vk.cmdPipelineBarrier(*cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_GPU_COMMANDS, DE_FALSE, (deUint32)DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+
+ {
+ const VkClearValue clearValue = clearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
+ const VkRenderPassBeginInfo passBeginParams =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *renderPass, // VkRenderPass renderPass;
+ *framebuffer, // VkFramebuffer framebuffer;
+ { { 0, 0 }, { renderSize.x(), renderSize.y() } }, // VkRect2D renderArea;
+ 1u, // deUint32 attachmentCount;
+ &clearValue, // const VkClearValue* pAttachmentClearValues;
+ };
+ vk.cmdBeginRenderPass(*cmdBuf, &passBeginParams, VK_RENDER_PASS_CONTENTS_INLINE);
+ }
+
+ vk.cmdBindDynamicViewportState(*cmdBuf, *dynViewportState);
+ vk.cmdBindDynamicRasterState(*cmdBuf, *dynRasterState);
+ vk.cmdBindDynamicDepthStencilState(*cmdBuf, *dynDepthStencilState);
+ vk.cmdBindPipeline(*cmdBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
+ {
+ const VkDeviceSize bindingOffset = 0;
+ vk.cmdBindVertexBuffers(*cmdBuf, 0u, 1u, &vertexBuffer.get(), &bindingOffset);
+ }
+ vk.cmdDraw(*cmdBuf, 0u, 3u, 0u, 1u);
+ vk.cmdEndRenderPass(*cmdBuf);
+
+ {
+ const VkImageMemoryBarrier renderFinishBarrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT, // VkMemoryOutputFlags outputMask;
+ VK_MEMORY_INPUT_TRANSFER_BIT, // VkMemoryInputFlags inputMask;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
+ VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL, // VkImageLayout newLayout;
+ queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
+ queueFamilyIndex, // deUint32 destQueueFamilyIndex;
+ *image, // VkImage image;
+ {
+ VK_IMAGE_ASPECT_COLOR, // VkImageAspect aspect;
+ 0u, // deUint32 baseMipLevel;
+ 1u, // deUint32 mipLevels;
+ 0u, // deUint32 baseArraySlice;
+ 1u, // deUint32 arraySize;
+ } // VkImageSubresourceRange subresourceRange;
+ };
+ const void* barriers[] = { &renderFinishBarrier };
+ vk.cmdPipelineBarrier(*cmdBuf, VK_PIPELINE_STAGE_ALL_GRAPHICS, VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, (deUint32)DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+
+ {
+ const VkBufferImageCopy copyParams =
+ {
+ (VkDeviceSize)0u, // VkDeviceSize bufferOffset;
+ (deUint32)renderSize.x(), // deUint32 bufferRowLength;
+ (deUint32)renderSize.y(), // deUint32 bufferImageHeight;
+ {
+ VK_IMAGE_ASPECT_COLOR, // VkImageAspect aspect;
+ 0u, // deUint32 mipLevel;
+ 0u, // deUint32 arraySlice;
+ }, // VkImageSubresource imageSubresource;
+ { 0u, 0u, 0u }, // VkOffset3D imageOffset;
+ { renderSize.x(), renderSize.y(), 1u } // VkExtent3D imageExtent;
+ };
+ vk.cmdCopyImageToBuffer(*cmdBuf, *image, VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL, *readImageBuffer, 1u, ©Params);
+ }
+
+ {
+ const VkBufferMemoryBarrier copyFinishBarrier =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_MEMORY_OUTPUT_TRANSFER_BIT, // VkMemoryOutputFlags outputMask;
+ VK_MEMORY_INPUT_HOST_READ_BIT, // VkMemoryInputFlags inputMask;
+ queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
+ queueFamilyIndex, // deUint32 destQueueFamilyIndex;
+ *readImageBuffer, // VkBuffer buffer;
+ 0u, // VkDeviceSize offset;
+ imageSizeBytes // VkDeviceSize size;
+ };
+ const void* barriers[] = { ©FinishBarrier };
+ vk.cmdPipelineBarrier(*cmdBuf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, (deUint32)DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+
+ VK_CHECK(vk.endCommandBuffer(*cmdBuf));
+
+ // Upload vertex data
+ {
+ const VkMappedMemoryRange range =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ vertexBufferMemory->getMemory(), // VkDeviceMemory mem;
+ 0, // VkDeviceSize offset;
+ (VkDeviceSize)sizeof(vertices), // VkDeviceSize size;
+ };
+ void* vertexBufPtr = vertexBufferMemory->getHostPtr();
+
+ deMemcpy(vertexBufPtr, &vertices[0], sizeof(vertices));
+ VK_CHECK(vk.flushMappedMemoryRanges(vkDevice, 1u, &range));
+ }
+
+ // Submit & wait for completion
+ {
+ const VkFenceCreateInfo fenceParams =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkFenceCreateFlags flags;
+ };
+ const Unique<VkFence> fence (createFence(vk, vkDevice, &fenceParams));
+
+ VK_CHECK(vk.queueSubmit(queue, 1u, &cmdBuf.get(), *fence));
+ VK_CHECK(vk.waitForFences(vkDevice, 1u, &fence.get(), DE_TRUE, ~0ull));
+ }
+
+ // Log image
+ {
+ const VkMappedMemoryRange range =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ readImageBufferMemory->getMemory(), // VkDeviceMemory mem;
+ 0, // VkDeviceSize offset;
+ imageSizeBytes, // VkDeviceSize size;
+ };
+ void* imagePtr = readImageBufferMemory->getHostPtr();
+
+ VK_CHECK(vk.invalidateMappedMemoryRanges(vkDevice, 1u, &range));
+ context.getTestContext().getLog() << TestLog::Image("Result", "Result", tcu::ConstPixelBufferAccess(tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), renderSize.x(), renderSize.y(), 1, imagePtr));
+ }
+
+ return tcu::TestStatus::pass("Rendering succeeded");
+}
+
+} // anonymous
+
+tcu::TestCaseGroup* createSmokeTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> smokeTests (new tcu::TestCaseGroup(testCtx, "smoke", "Smoke Tests"));
+
+ addFunctionCase (smokeTests.get(), "create_sampler", "", createSamplerTest);
+ addFunctionCaseWithPrograms (smokeTests.get(), "create_shader", "", createShaderProgs, createShaderModuleTest);
+ addFunctionCaseWithPrograms (smokeTests.get(), "triangle", "", createTriangleProgs, renderTriangleTest);
+ addFunctionCaseWithPrograms (smokeTests.get(), "asm_triangle", "", createTriangleAsmProgs, renderTriangleTest);
+
+ return smokeTests.release();
+}
+
+} // api
+} // vkt
--- /dev/null
+#ifndef _VKTAPISMOKETESTS_HPP
+#define _VKTAPISMOKETESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Simple Smoke Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace api
+{
+
+tcu::TestCaseGroup* createSmokeTests (tcu::TestContext& testCtx);
+
+} // api
+} // vkt
+
+#endif // _VKTAPISMOKETESTS_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief API Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktApiTests.hpp"
+
+#include "deUniquePtr.hpp"
+
+#include "vktApiSmokeTests.hpp"
+#include "vktApiDeviceInitializationTests.hpp"
+
+namespace vkt
+{
+namespace api
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> apiTests (new tcu::TestCaseGroup(testCtx, "api", "API Tests"));
+
+ apiTests->addChild(createSmokeTests (testCtx));
+ apiTests->addChild(createDeviceInitializationTests (testCtx));
+
+ return apiTests.release();
+}
+
+} // api
+} // vkt
--- /dev/null
+#ifndef _VKTAPITESTS_HPP
+#define _VKTAPITESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief API tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace api
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx);
+
+} // api
+} // vkt
+
+#endif // _VKTAPITESTS_HPP
--- /dev/null
+# dEQP-VK.binding_model
+
+include_directories(..)
+
+set(DEQP_VK_BINDING_MODEL_SRCS
+ vktBindingModelTests.cpp
+ vktBindingModelTests.hpp
+ vktBindingShaderAccessTests.cpp
+ vktBindingShaderAccessTests.hpp
+ )
+
+set(DEQP_VK_BINDING_MODEL_LIBS
+ tcutil
+ vkutil
+ )
+
+add_library(deqp-vk-binding-model STATIC ${DEQP_VK_BINDING_MODEL_SRCS})
+target_link_libraries(deqp-vk-binding-model ${DEQP_VK_BINDING_MODEL_LIBS})
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Binding Model tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktBindingModelTests.hpp"
+
+#include "vktBindingShaderAccessTests.hpp"
+
+#include "deUniquePtr.hpp"
+
+namespace vkt
+{
+namespace BindingModel
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "binding_model", "Resource binding tests"));
+
+ group->addChild(createShaderAccessTests(testCtx));
+
+ // \todo [2015-07-30 jarkko] .change_binding.{between_renderpasses, within_pass}
+ // \todo [2015-07-30 jarkko] .descriptor_set_chain
+ // \todo [2015-07-30 jarkko] .update_descriptor_set
+
+ return group.release();
+}
+
+} // BindingModel
+} // vkt
--- /dev/null
+#ifndef _VKTBINDINGMODELTESTS_HPP
+#define _VKTBINDINGMODELTESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Binding Model tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace BindingModel
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx);
+
+} // BindingModel
+} // vkt
+
+#endif // _VKTBINDINGMODELTESTS_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Binding shader access tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktBindingShaderAccessTests.hpp"
+
+#include "vktTestCase.hpp"
+
+#include "vkDefs.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkPlatform.hpp"
+#include "vkPrograms.hpp"
+#include "vkMemUtil.hpp"
+#include "vkBuilderUtil.hpp"
+#include "vkQueryUtil.hpp"
+
+#include "tcuVector.hpp"
+#include "tcuVectorUtil.hpp"
+#include "tcuTexture.hpp"
+#include "tcuTextureUtil.hpp"
+#include "tcuResultCollector.hpp"
+#include "tcuTestLog.hpp"
+#include "tcuRGBA.hpp"
+#include "tcuSurface.hpp"
+#include "tcuImageCompare.hpp"
+
+#include "deUniquePtr.hpp"
+#include "deSharedPtr.hpp"
+#include "deStringUtil.hpp"
+#include "deArrayUtil.hpp"
+
+#include "qpInfo.h"
+
+namespace vkt
+{
+namespace BindingModel
+{
+namespace
+{
+
+enum ResourceFlag
+{
+ RESOURCE_FLAG_IMMUTABLE_SAMPLER = (1u << 0u),
+
+ RESOURCE_FLAG_LAST = (1u << 1u)
+};
+
+static const char* const s_quadrantGenVertexPosSource = " highp int quadPhase = gl_VertexID % 6;\n"
+ " highp int quadXcoord = int(quadPhase == 1 || quadPhase == 4 || quadPhase == 5);\n"
+ " highp int quadYcoord = int(quadPhase == 2 || quadPhase == 3 || quadPhase == 5);\n"
+ " highp int quadOriginX = (gl_VertexID / 6) % 2;\n"
+ " highp int quadOriginY = (gl_VertexID / 6) / 2;\n"
+ " quadrant_id = gl_VertexID / 6;\n"
+ " result_position = vec4(float(quadOriginX + quadXcoord - 1), float(quadOriginY + quadYcoord - 1), 0.0, 1.0);\n";
+
+bool isUniformDescriptorType (vk::VkDescriptorType type)
+{
+ return type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
+ type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
+ type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+}
+
+bool isDynamicDescriptorType (vk::VkDescriptorType type)
+{
+ return type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || type == vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+}
+
+vk::VkFormat mapToVkTextureFormat (const tcu::TextureFormat& format)
+{
+ if (format == tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8))
+ return vk::VK_FORMAT_R8G8B8A8_UNORM;
+
+ DE_FATAL("Not implemented");
+ return vk::VK_FORMAT_UNDEFINED;
+}
+
+vk::VkImageType viewTypeToImageType (vk::VkImageViewType type)
+{
+ switch (type)
+ {
+ case vk::VK_IMAGE_VIEW_TYPE_1D:
+ case vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY: return vk::VK_IMAGE_TYPE_1D;
+ case vk::VK_IMAGE_VIEW_TYPE_2D:
+ case vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY: return vk::VK_IMAGE_TYPE_2D;
+ case vk::VK_IMAGE_VIEW_TYPE_3D: return vk::VK_IMAGE_TYPE_3D;
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE:
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return vk::VK_IMAGE_TYPE_2D;
+
+ default:
+ DE_FATAL("Impossible");
+ return (vk::VkImageType)0;
+ }
+}
+
+vk::VkTexFilter mapMagFilterToVkTexFilter (tcu::Sampler::FilterMode mode)
+{
+ switch (mode)
+ {
+ case tcu::Sampler::NEAREST: return vk::VK_TEX_FILTER_NEAREST;
+ case tcu::Sampler::LINEAR: return vk::VK_TEX_FILTER_LINEAR;
+
+ default:
+ DE_FATAL("Illegal filter mode");
+ return (vk::VkTexFilter)0;
+ }
+}
+
+vk::VkTexFilter mapMinFilterToVkTexFilter (tcu::Sampler::FilterMode mode)
+{
+ switch (mode)
+ {
+ case tcu::Sampler::NEAREST: return vk::VK_TEX_FILTER_NEAREST;
+ case tcu::Sampler::LINEAR: return vk::VK_TEX_FILTER_LINEAR;
+ case tcu::Sampler::NEAREST_MIPMAP_NEAREST: return vk::VK_TEX_FILTER_NEAREST;
+ case tcu::Sampler::LINEAR_MIPMAP_NEAREST: return vk::VK_TEX_FILTER_LINEAR;
+ case tcu::Sampler::NEAREST_MIPMAP_LINEAR: return vk::VK_TEX_FILTER_NEAREST;
+ case tcu::Sampler::LINEAR_MIPMAP_LINEAR: return vk::VK_TEX_FILTER_LINEAR;
+
+ default:
+ DE_FATAL("Illegal filter mode");
+ return (vk::VkTexFilter)0;
+ }
+}
+
+vk::VkTexMipmapMode mapMinFilterToVkTexMipmapMode (tcu::Sampler::FilterMode mode)
+{
+ switch (mode)
+ {
+ case tcu::Sampler::NEAREST: return vk::VK_TEX_MIPMAP_MODE_BASE;
+ case tcu::Sampler::LINEAR: return vk::VK_TEX_MIPMAP_MODE_BASE;
+ case tcu::Sampler::NEAREST_MIPMAP_NEAREST: return vk::VK_TEX_MIPMAP_MODE_NEAREST;
+ case tcu::Sampler::LINEAR_MIPMAP_NEAREST: return vk::VK_TEX_MIPMAP_MODE_NEAREST;
+ case tcu::Sampler::NEAREST_MIPMAP_LINEAR: return vk::VK_TEX_MIPMAP_MODE_LINEAR;
+ case tcu::Sampler::LINEAR_MIPMAP_LINEAR: return vk::VK_TEX_MIPMAP_MODE_LINEAR;
+
+ default:
+ DE_FATAL("Illegal filter mode");
+ return (vk::VkTexMipmapMode)0;
+ }
+}
+
+vk::VkTexAddress mapToVkTexAddress (tcu::Sampler::WrapMode mode)
+{
+ switch (mode)
+ {
+ case tcu::Sampler::CLAMP_TO_EDGE: return vk::VK_TEX_ADDRESS_CLAMP;
+ case tcu::Sampler::CLAMP_TO_BORDER: return vk::VK_TEX_ADDRESS_CLAMP_BORDER;
+ case tcu::Sampler::REPEAT_GL: return vk::VK_TEX_ADDRESS_WRAP;
+ case tcu::Sampler::MIRRORED_REPEAT_GL: return vk::VK_TEX_ADDRESS_MIRROR;
+
+ default:
+ DE_FATAL("Illegal wrap mode");
+ return (vk::VkTexAddress)0;
+ }
+}
+
+vk::VkCompareOp mapToVkCompareOp (tcu::Sampler::CompareMode mode)
+{
+ switch (mode)
+ {
+ case tcu::Sampler::COMPAREMODE_LESS: return vk::VK_COMPARE_OP_LESS;
+ case tcu::Sampler::COMPAREMODE_LESS_OR_EQUAL: return vk::VK_COMPARE_OP_LESS_EQUAL;
+ case tcu::Sampler::COMPAREMODE_GREATER: return vk::VK_COMPARE_OP_GREATER;
+ case tcu::Sampler::COMPAREMODE_GREATER_OR_EQUAL: return vk::VK_COMPARE_OP_GREATER_EQUAL;
+ case tcu::Sampler::COMPAREMODE_EQUAL: return vk::VK_COMPARE_OP_EQUAL;
+ case tcu::Sampler::COMPAREMODE_NOT_EQUAL: return vk::VK_COMPARE_OP_NOT_EQUAL;
+ case tcu::Sampler::COMPAREMODE_ALWAYS: return vk::VK_COMPARE_OP_ALWAYS;
+ case tcu::Sampler::COMPAREMODE_NEVER: return vk::VK_COMPARE_OP_NEVER;
+
+ default:
+ DE_FATAL("Illegal compare mode");
+ return (vk::VkCompareOp)0;
+ }
+}
+
+deUint32 getTextureLevelPyramidDataSize (const tcu::TextureLevelPyramid& srcImage)
+{
+ deUint32 dataSize = 0;
+ for (int level = 0; level < srcImage.getNumLevels(); ++level)
+ {
+ const tcu::ConstPixelBufferAccess srcAccess = srcImage.getLevel(level);
+
+ // tightly packed
+ DE_ASSERT(srcAccess.getFormat().getPixelSize() == srcAccess.getPixelPitch());
+
+ dataSize += srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
+ }
+ return dataSize;
+}
+
+void writeTextureLevelPyramidData (void* dst, deUint32 dstLen, const tcu::TextureLevelPyramid& srcImage, vk::VkImageViewType viewType, std::vector<vk::VkBufferImageCopy>* copySlices)
+{
+ // \note cube is copied face-by-face
+ const deUint32 arraySize = (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (srcImage.getLevel(0).getHeight()) :
+ (viewType == vk::VK_IMAGE_VIEW_TYPE_2D || viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? (srcImage.getLevel(0).getDepth()) :
+ (viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (1) :
+ (viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (srcImage.getLevel(0).getDepth()) :
+ ((deUint32)0);
+ deUint32 levelOffset = 0;
+
+ DE_ASSERT(arraySize != 0);
+
+ for (int level = 0; level < srcImage.getNumLevels(); ++level)
+ {
+ const tcu::ConstPixelBufferAccess srcAccess = srcImage.getLevel(level);
+ const tcu::PixelBufferAccess dstAccess (srcAccess.getFormat(), srcAccess.getSize(), srcAccess.getPitch(), (deUint8*)dst + levelOffset);
+ const deUint32 dataSize = srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
+ const deUint32 sliceDataSize = dataSize / arraySize;
+ const deInt32 sliceHeight = (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (1) : (srcAccess.getHeight());
+ const deInt32 sliceDepth = (viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (srcAccess.getDepth()) : (1);
+ const tcu::IVec3 sliceSize (srcAccess.getWidth(), sliceHeight, sliceDepth);
+
+ // tightly packed
+ DE_ASSERT(srcAccess.getFormat().getPixelSize() == srcAccess.getPixelPitch());
+
+ for (int sliceNdx = 0; sliceNdx < (int)arraySize; ++sliceNdx)
+ {
+ const vk::VkBufferImageCopy copySlice =
+ {
+ (vk::VkDeviceSize)levelOffset + sliceNdx * sliceDataSize, // bufferOffset
+ (deUint32)sliceSize.x(), // bufferRowLength
+ (deUint32)sliceSize.y(), // bufferImageHeight
+ {
+ vk::VK_IMAGE_ASPECT_COLOR, // aspect
+ (deUint32)level, // mipLevel
+ (deUint32)sliceNdx // arraySlice
+ }, // imageSubresource
+ {
+ 0,
+ 0,
+ 0,
+ }, // imageOffset
+ {
+ sliceSize.x(),
+ sliceSize.y(),
+ sliceSize.z(),
+ } // imageExtent
+ };
+ copySlices->push_back(copySlice);
+ }
+
+ DE_ASSERT(arraySize * sliceDataSize == dataSize);
+
+ tcu::copy(dstAccess, srcAccess);
+ levelOffset += dataSize;
+ }
+
+ DE_ASSERT(dstLen == levelOffset);
+ DE_UNREF(dstLen);
+}
+
+de::MovePtr<vk::Allocation> allocateAndBindObjectMemory (const vk::DeviceInterface& vki, vk::VkDevice device, vk::Allocator& allocator, vk::VkBuffer buffer, vk::MemoryRequirement requirement)
+{
+ const vk::VkMemoryRequirements requirements = vk::getBufferMemoryRequirements(vki, device, buffer);
+ de::MovePtr<vk::Allocation> allocation = allocator.allocate(requirements, requirement);
+
+ VK_CHECK(vki.bindBufferMemory(device, buffer, allocation->getMemory(), allocation->getOffset()));
+ return allocation;
+}
+
+de::MovePtr<vk::Allocation> allocateAndBindObjectMemory (const vk::DeviceInterface& vki, vk::VkDevice device, vk::Allocator& allocator, vk::VkImage image, vk::MemoryRequirement requirement)
+{
+ const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vki, device, image);
+ de::MovePtr<vk::Allocation> allocation = allocator.allocate(requirements, requirement);
+
+ VK_CHECK(vki.bindImageMemory(device, image, allocation->getMemory(), allocation->getOffset()));
+ return allocation;
+}
+
+vk::VkDescriptorInfo createDescriptorInfo (vk::VkBufferView bufferView)
+{
+ const vk::VkDescriptorInfo resultInfo =
+ {
+ bufferView, // bufferView
+ 0, // sampler
+ 0, // imageView
+ 0, // attachmentView
+ (vk::VkImageLayout)0, // imageLayout
+ };
+ return resultInfo;
+}
+
+vk::VkDescriptorInfo createDescriptorInfo (vk::VkSampler sampler)
+{
+ const vk::VkDescriptorInfo resultInfo =
+ {
+ 0, // bufferView
+ sampler, // sampler
+ 0, // imageView
+ 0, // attachmentView
+ (vk::VkImageLayout)0, // imageLayout
+ };
+ return resultInfo;
+}
+
+vk::VkDescriptorInfo createDescriptorInfo (vk::VkImageView imageView, vk::VkImageLayout layout)
+{
+ const vk::VkDescriptorInfo resultInfo =
+ {
+ 0, // bufferView
+ 0, // sampler
+ imageView, // imageView
+ 0, // attachmentView
+ layout, // imageLayout
+ };
+ return resultInfo;
+}
+
+vk::VkDescriptorInfo createDescriptorInfo (vk::VkSampler sampler, vk::VkImageView imageView, vk::VkImageLayout layout)
+{
+ const vk::VkDescriptorInfo resultInfo =
+ {
+ 0, // bufferView
+ sampler, // sampler
+ imageView, // imageView
+ 0, // attachmentView
+ layout, // imageLayout
+ };
+ return resultInfo;
+}
+
+vk::VkClearValue createClearValueColor (const tcu::Vec4& color)
+{
+ vk::VkClearValue retVal;
+
+ retVal.color.f32[0] = color.x();
+ retVal.color.f32[1] = color.y();
+ retVal.color.f32[2] = color.z();
+ retVal.color.f32[3] = color.w();
+
+ return retVal;
+};
+
+void drawQuadrantReferenceResult (const tcu::PixelBufferAccess& dst, const tcu::Vec4& c1, const tcu::Vec4& c2, const tcu::Vec4& c3, const tcu::Vec4& c4)
+{
+ tcu::clear(tcu::getSubregion(dst, 0, 0, dst.getWidth() / 2, dst.getHeight() / 2), c1);
+ tcu::clear(tcu::getSubregion(dst, dst.getWidth() / 2, 0, dst.getWidth() - dst.getWidth() / 2, dst.getHeight() / 2), c2);
+ tcu::clear(tcu::getSubregion(dst, 0, dst.getHeight() / 2, dst.getWidth() / 2, dst.getHeight() - dst.getHeight() / 2), c3);
+ tcu::clear(tcu::getSubregion(dst, dst.getWidth() / 2, dst.getHeight() / 2, dst.getWidth() - dst.getWidth() / 2, dst.getHeight() - dst.getHeight() / 2), c4);
+}
+
+class SingleTargetRenderInstance : public vkt::TestInstance
+{
+public:
+ SingleTargetRenderInstance (Context& context,
+ const tcu::UVec2& size);
+
+private:
+ static vk::Move<vk::VkImage> createColorAttachment (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ const tcu::TextureFormat& format,
+ const tcu::UVec2& size,
+ de::MovePtr<vk::Allocation>* outAllocation);
+
+ static vk::Move<vk::VkAttachmentView> createColorAttachmentView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const tcu::TextureFormat& format,
+ vk::VkImage image);
+
+ static vk::Move<vk::VkRenderPass> createRenderPass (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const tcu::TextureFormat& format);
+
+ static vk::Move<vk::VkFramebuffer> createFramebuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkRenderPass renderpass,
+ vk::VkAttachmentView colorAttachmentView,
+ const tcu::UVec2& size);
+
+ static vk::Move<vk::VkCmdPool> createCommandPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex);
+
+ virtual void logTestPlan (void) const = 0;
+ virtual void renderToTarget (void) = 0;
+ virtual tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const = 0;
+
+ void readRenderTarget (tcu::TextureLevel& dst);
+ tcu::TestStatus iterate (void);
+
+protected:
+ const tcu::TextureFormat m_targetFormat;
+ const tcu::UVec2 m_targetSize;
+
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+ const vk::VkQueue m_queue;
+ const deUint32 m_queueFamilyIndex;
+ vk::Allocator& m_allocator;
+ de::MovePtr<vk::Allocation> m_colorAttachmentMemory;
+ const vk::Unique<vk::VkImage> m_colorAttachmentImage;
+ const vk::Unique<vk::VkAttachmentView> m_colorAttachmentView;
+ const vk::Unique<vk::VkRenderPass> m_renderPass;
+ const vk::Unique<vk::VkFramebuffer> m_framebuffer;
+ const vk::Unique<vk::VkCmdPool> m_cmdPool;
+
+ bool m_firstIteration;
+};
+
+SingleTargetRenderInstance::SingleTargetRenderInstance (Context& context,
+ const tcu::UVec2& size)
+ : vkt::TestInstance (context)
+ , m_targetFormat (tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8)
+ , m_targetSize (size)
+ , m_vki (context.getDeviceInterface())
+ , m_device (context.getDevice())
+ , m_queue (context.getUniversalQueue())
+ , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
+ , m_allocator (context.getDefaultAllocator())
+ , m_colorAttachmentMemory (DE_NULL)
+ , m_colorAttachmentImage (createColorAttachment(m_vki, m_device, m_allocator, m_targetFormat, m_targetSize, &m_colorAttachmentMemory))
+ , m_colorAttachmentView (createColorAttachmentView(m_vki, m_device, m_targetFormat, *m_colorAttachmentImage))
+ , m_renderPass (createRenderPass(m_vki, m_device, m_targetFormat))
+ , m_framebuffer (createFramebuffer(m_vki, m_device, *m_renderPass, *m_colorAttachmentView, m_targetSize))
+ , m_cmdPool (createCommandPool(m_vki, m_device, context.getUniversalQueueFamilyIndex()))
+ , m_firstIteration (true)
+{
+}
+
+vk::Move<vk::VkImage> SingleTargetRenderInstance::createColorAttachment (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ const tcu::TextureFormat& format,
+ const tcu::UVec2& size,
+ de::MovePtr<vk::Allocation>* outAllocation)
+{
+ const vk::VkImageCreateInfo imageInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ DE_NULL,
+ vk::VK_IMAGE_TYPE_2D, // imageType
+ mapToVkTextureFormat(format), // format
+ { (deInt32)size.x(), (deInt32)size.y(), 1 }, // extent
+ 1, // mipLevels
+ 1, // arraySize
+ 1, // samples
+ vk::VK_IMAGE_TILING_OPTIMAL, // tiling
+ vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT, // usage
+ 0, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+
+ vk::Move<vk::VkImage> image (vk::createImage(vki, device, &imageInfo));
+ de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(vki, device, allocator, *image, vk::MemoryRequirement::Any));
+
+ *outAllocation = allocation;
+ return image;
+}
+
+vk::Move<vk::VkAttachmentView> SingleTargetRenderInstance::createColorAttachmentView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const tcu::TextureFormat& format,
+ vk::VkImage image)
+{
+ const vk::VkAttachmentViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO,
+ DE_NULL,
+ image, // image
+ mapToVkTextureFormat(format), // format
+ 0u, // mipLevel
+ 0u, // baseArraySlice
+ 1u, // arraySize
+ 0u, // flags
+ };
+
+ return vk::createAttachmentView(vki, device, &createInfo);
+}
+
+vk::Move<vk::VkRenderPass> SingleTargetRenderInstance::createRenderPass (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const tcu::TextureFormat& format)
+{
+ const vk::VkAttachmentDescription attachmentDescription =
+ {
+ vk::VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION,
+ DE_NULL,
+ mapToVkTextureFormat(format), // format
+ 1u, // samples
+ vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
+ vk::VK_ATTACHMENT_STORE_OP_STORE, // storeOp
+ vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
+ vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilStoreOp
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // initialLayout
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // finalLayout
+ };
+ const vk::VkAttachmentReference colorAttachment =
+ {
+ 0u, // attachment
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // layout
+ };
+ const vk::VkAttachmentReference depthStencilAttachment =
+ {
+ vk::VK_NO_ATTACHMENT, // attachment
+ vk::VK_IMAGE_LAYOUT_UNDEFINED // layout
+ };
+ const vk::VkSubpassDescription subpass =
+ {
+ vk::VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION,
+ DE_NULL,
+ vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
+ 0u, // flags
+ 0u, // inputCount
+ DE_NULL, // inputAttachments
+ 1u, // colorCount
+ &colorAttachment, // colorAttachments
+ DE_NULL, // resolveAttachments
+ depthStencilAttachment, // depthStencilAttachment
+ 0u, // preserveCount
+ DE_NULL // preserveAttachments
+ };
+ const vk::VkRenderPassCreateInfo renderPassCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ DE_NULL,
+ 1u, // attachmentCount
+ &attachmentDescription, // pAttachments
+ 1u, // subpassCount
+ &subpass, // pSubpasses
+ 0u, // dependencyCount
+ DE_NULL, // pDependencies
+ };
+
+ return vk::createRenderPass(vki, device, &renderPassCreateInfo);
+}
+
+vk::Move<vk::VkFramebuffer> SingleTargetRenderInstance::createFramebuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkRenderPass renderpass,
+ vk::VkAttachmentView colorAttachmentView,
+ const tcu::UVec2& size)
+{
+ const vk::VkAttachmentBindInfo colorAttachment =
+ {
+ colorAttachmentView, // view;
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // layout;
+ };
+ const vk::VkFramebufferCreateInfo framebufferCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ DE_NULL,
+ renderpass, // renderPass
+ 1u, // attachmentCount
+ &colorAttachment, // pAttachments
+ size.x(), // width
+ size.y(), // height
+ 1, // layers
+ };
+
+ return vk::createFramebuffer(vki, device, &framebufferCreateInfo);
+}
+
+vk::Move<vk::VkCmdPool> SingleTargetRenderInstance::createCommandPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex)
+{
+ const vk::VkCmdPoolCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO,
+ DE_NULL,
+ queueFamilyIndex, // queueFamilyIndex
+ vk::VK_CMD_POOL_CREATE_TRANSIENT_BIT, // flags
+ };
+ return vk::createCommandPool(vki, device, &createInfo);
+}
+
+void SingleTargetRenderInstance::readRenderTarget (tcu::TextureLevel& dst)
+{
+ const deUint64 pixelDataSize = (deUint64)(m_targetSize.x() * m_targetSize.y() * m_targetFormat.getPixelSize());
+ const vk::VkBufferCreateInfo bufferCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ pixelDataSize, // size
+ vk::VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, // usage
+ 0u, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ const vk::Unique<vk::VkBuffer> buffer (vk::createBuffer(m_vki, m_device, &bufferCreateInfo));
+ const vk::VkImageSubresourceRange fullSubrange =
+ {
+ vk::VK_IMAGE_ASPECT_COLOR, // aspect
+ 0u, // baseMipLevel
+ 1u, // mipLevels
+ 0u, // baseArraySlice
+ 1u, // arraySize
+ };
+ const vk::VkImageMemoryBarrier imageBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT, // outputMask
+ vk::VK_MEMORY_INPUT_TRANSFER_BIT, // inputMask
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
+ vk::VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL, // newLayout
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *m_colorAttachmentImage, // image
+ fullSubrange, // subresourceRange
+ };
+ const vk::VkBufferMemoryBarrier memoryBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_TRANSFER_BIT, // outputMask
+ vk::VK_MEMORY_INPUT_HOST_READ_BIT, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *buffer, // buffer
+ 0u, // offset
+ (vk::VkDeviceSize)pixelDataSize // size
+ };
+ const vk::VkCmdBufferCreateInfo cmdBufCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
+ DE_NULL,
+ *m_cmdPool, // cmdPool
+ vk::VK_CMD_BUFFER_LEVEL_PRIMARY, // level
+ 0u, // flags
+ };
+ const vk::VkFenceCreateInfo fenceCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+ 0u, // flags
+ };
+ const vk::VkCmdBufferBeginInfo cmdBufBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | vk::VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // flags
+ (vk::VkRenderPass)0u, // renderPass
+ (vk::VkFramebuffer)0u, // framebuffer
+ };
+ const vk::VkImageSubresource firstSlice =
+ {
+ vk::VK_IMAGE_ASPECT_COLOR, // aspect
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ const vk::VkBufferImageCopy copyRegion =
+ {
+ 0u, // bufferOffset
+ m_targetSize.x(), // bufferRowLength
+ m_targetSize.y(), // bufferImageHeight
+ firstSlice, // imageSubresource
+ { 0, 0, 0 }, // imageOffset
+ { (deInt32)m_targetSize.x(), (deInt32)m_targetSize.y(), 1 } // imageExtent
+ };
+
+ const de::MovePtr<vk::Allocation> bufferMemory = allocateAndBindObjectMemory(m_vki, m_device, m_allocator, *buffer, vk::MemoryRequirement::HostVisible);
+
+ const vk::Unique<vk::VkCmdBuffer> cmd (vk::createCommandBuffer(m_vki, m_device, &cmdBufCreateInfo));
+ const vk::Unique<vk::VkFence> cmdCompleteFence (vk::createFence(m_vki, m_device, &fenceCreateInfo));
+ const void* const imageBarrierPtr = &imageBarrier;
+ const void* const bufferBarrierPtr = &memoryBarrier;
+ const deUint64 infiniteTimeout = ~(deUint64)0u;
+
+ // copy content to buffer
+ VK_CHECK(m_vki.beginCommandBuffer(*cmd, &cmdBufBeginInfo));
+ m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_FALSE, 1, &imageBarrierPtr);
+ m_vki.cmdCopyImageToBuffer(*cmd, *m_colorAttachmentImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL, *buffer, 1, ©Region);
+ m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_FALSE, 1, &bufferBarrierPtr);
+ VK_CHECK(m_vki.endCommandBuffer(*cmd));
+
+ // wait for transfer to complete
+ VK_CHECK(m_vki.queueSubmit(m_queue, 1, &cmd.get(), *cmdCompleteFence));
+ VK_CHECK(m_vki.waitForFences(m_device, 1, &cmdCompleteFence.get(), 0u, infiniteTimeout)); // \note: timeout is failure
+
+ dst.setStorage(m_targetFormat, m_targetSize.x(), m_targetSize.y());
+
+ // copy data
+ invalidateMappedMemoryRange(m_vki, m_device, bufferMemory->getMemory(), bufferMemory->getOffset(), pixelDataSize);
+ tcu::copy(dst, tcu::ConstPixelBufferAccess(dst.getFormat(), dst.getSize(), bufferMemory->getHostPtr()));
+}
+
+tcu::TestStatus SingleTargetRenderInstance::iterate (void)
+{
+ tcu::TextureLevel resultImage;
+
+ // log
+ if (m_firstIteration)
+ {
+ logTestPlan();
+ m_firstIteration = false;
+ }
+
+ // render
+ {
+ // transition to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ const vk::VkImageSubresourceRange fullSubrange =
+ {
+ vk::VK_IMAGE_ASPECT_COLOR, // aspect
+ 0u, // baseMipLevel
+ 1u, // mipLevels
+ 0u, // baseArraySlice
+ 1u, // arraySize
+ };
+ const vk::VkImageMemoryBarrier imageBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ 0u, // outputMask
+ vk::VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT, // inputMask
+ vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *m_colorAttachmentImage, // image
+ fullSubrange, // subresourceRange
+ };
+ const vk::VkCmdBufferCreateInfo cmdBufCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
+ DE_NULL,
+ *m_cmdPool, // cmdPool
+ vk::VK_CMD_BUFFER_LEVEL_PRIMARY, // level
+ 0u, // flags
+ };
+ const vk::VkCmdBufferBeginInfo cmdBufBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | vk::VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // flags
+ (vk::VkRenderPass)0u, // renderPass
+ (vk::VkFramebuffer)0u, // framebuffer
+ };
+
+ const vk::Unique<vk::VkCmdBuffer> cmd (vk::createCommandBuffer(m_vki, m_device, &cmdBufCreateInfo));
+ const void* const imageBarrierPtr = &imageBarrier;
+
+ VK_CHECK(m_vki.beginCommandBuffer(*cmd, &cmdBufBeginInfo));
+ m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_FALSE, 1, &imageBarrierPtr);
+ VK_CHECK(m_vki.endCommandBuffer(*cmd));
+ VK_CHECK(m_vki.queueSubmit(m_queue, 1, &cmd.get(), 0));
+
+ // and then render to
+ renderToTarget();
+ }
+
+ // read and verify
+ readRenderTarget(resultImage);
+ return verifyResultImage(resultImage.getAccess());
+}
+
+class RenderInstanceShaders
+{
+public:
+ RenderInstanceShaders (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection);
+
+ inline bool hasTessellationStage (void) const { return *m_tessCtrlShader != 0 || *m_tessEvalShader != 0; }
+ inline deUint32 getNumStages (void) const { return (deUint32)m_stageInfos.size(); }
+ inline const vk::VkPipelineShaderStageCreateInfo* getStages (void) const { return &m_stageInfos[0]; }
+
+private:
+ void addStage (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection,
+ const char* name,
+ vk::VkShaderStage stage,
+ vk::Move<vk::VkShaderModule>* outModule,
+ vk::Move<vk::VkShader>* outShader);
+
+ vk::VkPipelineShaderStageCreateInfo getShaderStageCreateInfo (vk::VkShaderStage stage, vk::VkShader shader) const;
+
+ vk::Move<vk::VkShaderModule> m_vertexShaderModule;
+ vk::Move<vk::VkShader> m_vertexShader;
+ vk::Move<vk::VkShaderModule> m_tessCtrlShaderModule;
+ vk::Move<vk::VkShader> m_tessCtrlShader;
+ vk::Move<vk::VkShaderModule> m_tessEvalShaderModule;
+ vk::Move<vk::VkShader> m_tessEvalShader;
+ vk::Move<vk::VkShaderModule> m_geometryShaderModule;
+ vk::Move<vk::VkShader> m_geometryShader;
+ vk::Move<vk::VkShaderModule> m_fragmentShaderModule;
+ vk::Move<vk::VkShader> m_fragmentShader;
+ std::vector<vk::VkPipelineShaderStageCreateInfo> m_stageInfos;
+};
+
+RenderInstanceShaders::RenderInstanceShaders (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection)
+{
+ addStage(vki, device, programCollection, "vertex", vk::VK_SHADER_STAGE_VERTEX, &m_vertexShaderModule, &m_vertexShader);
+ addStage(vki, device, programCollection, "tess_ctrl", vk::VK_SHADER_STAGE_TESS_CONTROL, &m_tessCtrlShaderModule, &m_tessCtrlShader);
+ addStage(vki, device, programCollection, "tess_eval", vk::VK_SHADER_STAGE_TESS_EVALUATION, &m_tessEvalShaderModule, &m_tessEvalShader);
+ addStage(vki, device, programCollection, "geometry", vk::VK_SHADER_STAGE_GEOMETRY, &m_geometryShaderModule, &m_geometryShader);
+ addStage(vki, device, programCollection, "fragment", vk::VK_SHADER_STAGE_FRAGMENT, &m_fragmentShaderModule, &m_fragmentShader);
+
+ DE_ASSERT(!m_stageInfos.empty());
+}
+
+void RenderInstanceShaders::addStage (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection,
+ const char* name,
+ vk::VkShaderStage stage,
+ vk::Move<vk::VkShaderModule>* outModule,
+ vk::Move<vk::VkShader>* outShader)
+{
+ if (programCollection.contains(name))
+ {
+ vk::Move<vk::VkShaderModule> module = createShaderModule(vki, device, programCollection.get(name), (vk::VkShaderModuleCreateFlags)0);
+ const vk::VkShaderCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SHADER_CREATE_INFO,
+ DE_NULL,
+ *module, // module
+ "main", // pName
+ 0u // flags
+ };
+ vk::Move<vk::VkShader> shader = vk::createShader(vki, device, &createInfo);
+
+ m_stageInfos.push_back(getShaderStageCreateInfo(stage, *shader));
+ *outModule = module;
+ *outShader = shader;
+ }
+}
+
+vk::VkPipelineShaderStageCreateInfo RenderInstanceShaders::getShaderStageCreateInfo (vk::VkShaderStage stage, vk::VkShader shader) const
+{
+ const vk::VkPipelineShaderStageCreateInfo stageCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ stage, // stage
+ shader, // shader
+ DE_NULL, // pSpecializationInfo
+ };
+ return stageCreateInfo;
+}
+
+class SingleCmdRenderInstance : public SingleTargetRenderInstance
+{
+public:
+ SingleCmdRenderInstance (Context& context,
+ bool isPrimaryCmdBuf,
+ const tcu::UVec2& renderSize);
+
+private:
+ vk::Move<vk::VkPipeline> createPipeline (vk::VkPipelineLayout pipelineLayout);
+
+ virtual vk::VkPipelineLayout getPipelineLayout (void) const = 0;
+ virtual void writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const = 0;
+
+ void renderToTarget (void);
+
+ const bool m_isPrimaryCmdBuf;
+};
+
+SingleCmdRenderInstance::SingleCmdRenderInstance (Context& context,
+ bool isPrimaryCmdBuf,
+ const tcu::UVec2& renderSize)
+ : SingleTargetRenderInstance (context, renderSize)
+ , m_isPrimaryCmdBuf (isPrimaryCmdBuf)
+{
+}
+
+vk::Move<vk::VkPipeline> SingleCmdRenderInstance::createPipeline (vk::VkPipelineLayout pipelineLayout)
+{
+ const RenderInstanceShaders shaderStages (m_vki, m_device, m_context.getBinaryCollection());
+ const vk::VkPrimitiveTopology topology = shaderStages.hasTessellationStage() ? vk::VK_PRIMITIVE_TOPOLOGY_PATCH : vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u, // bindingCount
+ DE_NULL, // pVertexBindingDescriptions
+ 0u, // attributeCount
+ DE_NULL, // pVertexAttributeDescriptions
+ };
+ const vk::VkPipelineInputAssemblyStateCreateInfo iaState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ DE_NULL,
+ topology, // topology
+ vk::VK_FALSE, // primitiveRestartEnable
+ };
+ const vk::VkPipelineTessellationStateCreateInfo tessState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
+ DE_NULL,
+ 3u, // patchControlPoints
+ };
+ const vk::VkPipelineViewportStateCreateInfo vpState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ DE_NULL,
+ 1u, // viewportCount
+ };
+ const vk::VkPipelineRasterStateCreateInfo rsState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_RASTER_STATE_CREATE_INFO,
+ DE_NULL,
+ vk::VK_TRUE, // depthClipEnable
+ vk::VK_FALSE, // rasterizerDiscardEnable
+ vk::VK_FILL_MODE_SOLID, // fillMode
+ vk::VK_CULL_MODE_NONE, // cullMode
+ vk::VK_FRONT_FACE_CCW, // frontFace
+ };
+ const vk::VkPipelineMultisampleStateCreateInfo msState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ DE_NULL,
+ 1u, // rasterSamples
+ vk::VK_FALSE, // sampleShadingEnable
+ 0.0f, // minSampleShading
+ 0x01u // sampleMask
+ };
+ const vk::VkPipelineDepthStencilStateCreateInfo dsState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ DE_NULL,
+ vk::VK_FALSE, // depthTestEnable
+ vk::VK_FALSE, // depthWriteEnable
+ vk::VK_COMPARE_OP_ALWAYS, // depthCompareOp
+ vk::VK_FALSE, // depthBoundsEnable
+ vk::VK_FALSE, // stencilTestEnable
+ { vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_COMPARE_OP_ALWAYS }, // front
+ { vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_COMPARE_OP_ALWAYS }, // back
+ };
+ const vk::VkPipelineColorBlendAttachmentState cbAttachment =
+ {
+ vk::VK_FALSE, // blendEnable
+ vk::VK_BLEND_ZERO, // srcBlendColor
+ vk::VK_BLEND_ZERO, // destBlendColor
+ vk::VK_BLEND_OP_ADD, // blendOpColor
+ vk::VK_BLEND_ZERO, // srcBlendAlpha
+ vk::VK_BLEND_ZERO, // destBlendAlpha
+ vk::VK_BLEND_OP_ADD, // blendOpAlpha
+ vk::VK_CHANNEL_R_BIT | vk::VK_CHANNEL_G_BIT | vk::VK_CHANNEL_B_BIT | vk::VK_CHANNEL_A_BIT, // channelWriteMask
+ };
+ const vk::VkPipelineColorBlendStateCreateInfo cbState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ DE_NULL,
+ vk::VK_FALSE, // alphaToCoverageEnable
+ vk::VK_FALSE, // logicOpEnable
+ vk::VK_LOGIC_OP_CLEAR, // logicOp
+ 1u, // attachmentCount
+ &cbAttachment, // pAttachments
+ };
+ const vk::VkGraphicsPipelineCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ DE_NULL,
+ shaderStages.getNumStages(), // stageCount
+ shaderStages.getStages(), // pStages
+ &vertexInputState, // pVertexInputState
+ &iaState, // pInputAssemblyState
+ (shaderStages.hasTessellationStage() ? &tessState : DE_NULL), // pTessellationState
+ &vpState, // pViewportState
+ &rsState, // pRasterState
+ &msState, // pMultisampleState
+ &dsState, // pDepthStencilState
+ &cbState, // pColorBlendState
+ 0u, // flags
+ pipelineLayout, // layout
+ *m_renderPass, // renderPass
+ 0u, // subpass
+ (vk::VkPipeline)0, // basePipelineHandle
+ 0u, // basePipelineIndex
+ };
+ return createGraphicsPipeline(m_vki, m_device, (vk::VkPipelineCache)0u, &createInfo);
+}
+
+void SingleCmdRenderInstance::renderToTarget (void)
+{
+ const vk::VkViewport viewport =
+ {
+ 0.0f, // originX
+ 0.0f, // originY
+ float(m_targetSize.x()), // width
+ float(m_targetSize.y()), // height
+ 0.0f, // minDepth
+ 1.0f, // maxDepth
+ };
+ const vk::VkRect2D renderArea =
+ {
+ { 0, 0 }, // offset
+ { (deInt32)m_targetSize.x(), (deInt32)m_targetSize.y() }, // extent
+ };
+ const vk::VkDynamicViewportStateCreateInfo viewportStateCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
+ DE_NULL,
+ 1, // viewportAndScissorCount
+ &viewport, // pViewports
+ &renderArea, // pScissors
+ };
+ const vk::VkDynamicRasterStateCreateInfo rasterStateCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO,
+ DE_NULL,
+ 0.0f, // depthBias
+ 0.0f, // depthBiasClamp
+ 0.0f, // slopeScaledDepthBias
+ 1.0f, // lineWidth
+ };
+ const vk::VkDynamicColorBlendStateCreateInfo colorBlendStateCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO,
+ DE_NULL,
+ { 0.0f, 0.0f, 0.0f, 0.0f }, // blendConst
+ };
+ const vk::VkDynamicDepthStencilStateCreateInfo depthStencilStateCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO,
+ DE_NULL,
+ 0.0f, // minDepthBounds
+ 1.0f, // maxDepthBounds
+ 0u, // stencilReadMask
+ 0u, // stencilWriteMask
+ 0u, // stencilFrontRef
+ 0u, // stencilBackRef
+ };
+ const vk::VkCmdBufferCreateInfo mainCmdBufCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
+ DE_NULL,
+ *m_cmdPool, // cmdPool
+ vk::VK_CMD_BUFFER_LEVEL_PRIMARY, // level
+ 0u, // flags
+ };
+ const vk::VkCmdBufferBeginInfo mainCmdBufBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | vk::VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // flags
+ (vk::VkRenderPass)0u, // renderPass
+ (vk::VkFramebuffer)0u, // framebuffer
+ };
+ const vk::VkCmdBufferCreateInfo passCmdBufCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
+ DE_NULL,
+ *m_cmdPool, // cmdPool
+ vk::VK_CMD_BUFFER_LEVEL_SECONDARY, // level
+ 0u, // flags
+ };
+ const vk::VkCmdBufferBeginInfo passCmdBufBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | vk::VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // flags
+ (vk::VkRenderPass)*m_renderPass, // renderPass
+ (vk::VkFramebuffer)*m_framebuffer, // framebuffer
+ };
+ const vk::VkFenceCreateInfo fenceCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+ 0u, // flags
+ };
+ const vk::VkClearValue clearValue = createClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
+ const vk::VkRenderPassBeginInfo renderPassBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ DE_NULL,
+ *m_renderPass, // renderPass
+ *m_framebuffer, // framebuffer
+ renderArea, // renderArea
+ 1u, // attachmentCount
+ &clearValue, // pAttachmentClearValues
+ };
+
+ const vk::VkPipelineLayout pipelineLayout (getPipelineLayout());
+ const vk::Unique<vk::VkPipeline> pipeline (createPipeline(pipelineLayout));
+ const vk::Unique<vk::VkCmdBuffer> mainCmd (vk::createCommandBuffer(m_vki, m_device, &mainCmdBufCreateInfo));
+ const vk::Unique<vk::VkCmdBuffer> passCmd ((m_isPrimaryCmdBuf) ? (vk::Move<vk::VkCmdBuffer>()) : (vk::createCommandBuffer(m_vki, m_device, &passCmdBufCreateInfo)));
+ const vk::Unique<vk::VkDynamicViewportState> dynamicVpState (vk::createDynamicViewportState(m_vki, m_device, &viewportStateCreateInfo));
+ const vk::Unique<vk::VkDynamicRasterState> dynamicRsState (vk::createDynamicRasterState(m_vki, m_device, &rasterStateCreateInfo));
+ const vk::Unique<vk::VkDynamicColorBlendState> dynamicCbState (vk::createDynamicColorBlendState(m_vki, m_device, &colorBlendStateCreateInfo));
+ const vk::Unique<vk::VkDynamicDepthStencilState> dynamicDsState (vk::createDynamicDepthStencilState(m_vki, m_device, &depthStencilStateCreateInfo));
+ const vk::Unique<vk::VkFence> fence (vk::createFence(m_vki, m_device, &fenceCreateInfo));
+ const deUint64 infiniteTimeout = ~(deUint64)0u;
+ const vk::VkRenderPassContents passContents = (m_isPrimaryCmdBuf) ? (vk::VK_RENDER_PASS_CONTENTS_INLINE) : (vk::VK_RENDER_PASS_CONTENTS_SECONDARY_CMD_BUFFERS);
+
+ VK_CHECK(m_vki.beginCommandBuffer(*mainCmd, &mainCmdBufBeginInfo));
+ m_vki.cmdBeginRenderPass(*mainCmd, &renderPassBeginInfo, passContents);
+
+ if (m_isPrimaryCmdBuf)
+ {
+ m_vki.cmdBindDynamicViewportState(*mainCmd, *dynamicVpState);
+ m_vki.cmdBindDynamicRasterState(*mainCmd, *dynamicRsState);
+ m_vki.cmdBindDynamicColorBlendState(*mainCmd, *dynamicCbState);
+ m_vki.cmdBindDynamicDepthStencilState(*mainCmd, *dynamicDsState);
+ m_vki.cmdBindPipeline(*mainCmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
+ writeDrawCmdBuffer(*mainCmd);
+ }
+ else
+ {
+ VK_CHECK(m_vki.beginCommandBuffer(*passCmd, &passCmdBufBeginInfo));
+ m_vki.cmdBindDynamicViewportState(*passCmd, *dynamicVpState);
+ m_vki.cmdBindDynamicRasterState(*passCmd, *dynamicRsState);
+ m_vki.cmdBindDynamicColorBlendState(*passCmd, *dynamicCbState);
+ m_vki.cmdBindDynamicDepthStencilState(*passCmd, *dynamicDsState);
+ m_vki.cmdBindPipeline(*passCmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
+ writeDrawCmdBuffer(*passCmd);
+ VK_CHECK(m_vki.endCommandBuffer(*passCmd));
+
+ m_vki.cmdExecuteCommands(*mainCmd, 1, &passCmd.get());
+ }
+
+ m_vki.cmdEndRenderPass(*mainCmd);
+ VK_CHECK(m_vki.endCommandBuffer(*mainCmd));
+
+ // submit and wait for them to finish before exiting scope. (Killing in-flight objects is a no-no).
+ VK_CHECK(m_vki.queueSubmit(m_queue, 1, &mainCmd.get(), *fence));
+ VK_CHECK(m_vki.waitForFences(m_device, 1, &fence.get(), 0u, infiniteTimeout)); // \note: timeout is failure
+}
+
+enum ShaderInputInterface
+{
+ SHADER_INPUT_SINGLE_DESCRIPTOR = 0, //!< one descriptor
+ SHADER_INPUT_MULTIPLE_DESCRIPTORS, //!< multiple descriptors
+ SHADER_INPUT_DESCRIPTOR_ARRAY, //!< descriptor array
+
+ SHADER_INPUT_LAST
+};
+
+deUint32 getInterfaceNumResources (ShaderInputInterface shaderInterface)
+{
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR: return 1u;
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS: return 2u;
+ case SHADER_INPUT_DESCRIPTOR_ARRAY: return 2u;
+
+ default:
+ DE_FATAL("Impossible");
+ return 0u;
+ }
+}
+
+class BufferRenderInstance : public SingleCmdRenderInstance
+{
+public:
+ BufferRenderInstance (Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ bool viewOffset,
+ bool dynamicOffset,
+ bool dynamicOffsetNonZero);
+
+ static vk::Move<vk::VkBuffer> createSourceBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ deUint32 offset,
+ deUint32 bufferSize,
+ de::MovePtr<vk::Allocation>* outMemory);
+
+ static vk::Move<vk::VkBufferView> createBufferView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkBuffer buffer,
+ deUint32 offset);
+
+ static vk::Move<vk::VkDescriptorPool> createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface);
+
+ static vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags);
+
+ static vk::Move<vk::VkDescriptorSet> createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout,
+ vk::VkDescriptorPool descriptorPool,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkBufferView bufferViewA,
+ vk::VkBufferView bufferViewB);
+
+ static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout);
+
+ void logTestPlan (void) const;
+ vk::VkPipelineLayout getPipelineLayout (void) const;
+ void writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const;
+ tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const;
+
+ enum
+ {
+ RENDER_SIZE = 128,
+ BUFFER_DATA_SIZE = 8 * sizeof(float),
+ BUFFER_SIZE_A = 2048, //!< a lot more than required
+ BUFFER_SIZE_B = 2560, //!< a lot more than required
+
+ STATIC_OFFSET_VALUE_A = 256,
+ DYNAMIC_OFFSET_VALUE_A = 512,
+ STATIC_OFFSET_VALUE_B = 1024,
+ DYNAMIC_OFFSET_VALUE_B = 768,
+ };
+
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const bool m_setViewOffset;
+ const bool m_setDynamicOffset;
+ const bool m_dynamicOffsetNonZero;
+ const vk::VkShaderStageFlags m_stageFlags;
+
+ const deUint32 m_viewOffsetA;
+ const deUint32 m_viewOffsetB;
+ const deUint32 m_dynamicOffsetA;
+ const deUint32 m_dynamicOffsetB;
+ const deUint32 m_effectiveOffsetA;
+ const deUint32 m_effectiveOffsetB;
+ const deUint32 m_bufferSizeA;
+ const deUint32 m_bufferSizeB;
+
+ de::MovePtr<vk::Allocation> m_bufferMemoryA;
+ de::MovePtr<vk::Allocation> m_bufferMemoryB;
+ const vk::Unique<vk::VkBuffer> m_sourceBufferA;
+ const vk::Unique<vk::VkBuffer> m_sourceBufferB;
+ const vk::Unique<vk::VkBufferView> m_bufferViewA;
+ const vk::Unique<vk::VkBufferView> m_bufferViewB;
+ const vk::Unique<vk::VkDescriptorPool> m_descriptorPool;
+ const vk::Unique<vk::VkDescriptorSetLayout> m_descriptorSetLayout;
+ const vk::Unique<vk::VkDescriptorSet> m_descriptorSet;
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+};
+
+BufferRenderInstance::BufferRenderInstance (Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ bool viewOffset,
+ bool dynamicOffset,
+ bool dynamicOffsetNonZero)
+ : SingleCmdRenderInstance (context, isPrimaryCmdBuf, tcu::UVec2(RENDER_SIZE, RENDER_SIZE))
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_setViewOffset (viewOffset)
+ , m_setDynamicOffset (dynamicOffset)
+ , m_dynamicOffsetNonZero (dynamicOffsetNonZero)
+ , m_stageFlags (stageFlags)
+ , m_viewOffsetA ((m_setViewOffset) ? ((deUint32)STATIC_OFFSET_VALUE_A) : (0u))
+ , m_viewOffsetB ((m_setViewOffset) ? ((deUint32)STATIC_OFFSET_VALUE_B) : (0u))
+ , m_dynamicOffsetA ((dynamicOffsetNonZero) ? ((deUint32)DYNAMIC_OFFSET_VALUE_A) : (0u))
+ , m_dynamicOffsetB ((dynamicOffsetNonZero) ? ((deUint32)DYNAMIC_OFFSET_VALUE_B) : (0u))
+ , m_effectiveOffsetA ((isDynamicDescriptorType(m_descriptorType)) ? (m_dynamicOffsetA) : (m_viewOffsetA))
+ , m_effectiveOffsetB ((isDynamicDescriptorType(m_descriptorType)) ? (m_dynamicOffsetB) : (m_viewOffsetB))
+ , m_bufferSizeA (BUFFER_SIZE_A)
+ , m_bufferSizeB (BUFFER_SIZE_B)
+ , m_bufferMemoryA (DE_NULL)
+ , m_bufferMemoryB (DE_NULL)
+ , m_sourceBufferA (createSourceBuffer(m_vki, m_device, m_allocator, m_descriptorType, m_effectiveOffsetA, m_bufferSizeA, &m_bufferMemoryA))
+ , m_sourceBufferB ((getInterfaceNumResources(m_shaderInterface) == 1u)
+ ? vk::Move<vk::VkBuffer>()
+ : createSourceBuffer(m_vki, m_device, m_allocator, m_descriptorType, m_effectiveOffsetB, m_bufferSizeB, &m_bufferMemoryB))
+ , m_bufferViewA (createBufferView(m_vki, m_device, *m_sourceBufferA, m_viewOffsetA))
+ , m_bufferViewB ((getInterfaceNumResources(m_shaderInterface) == 1u)
+ ? vk::Move<vk::VkBufferView>()
+ : createBufferView(m_vki, m_device, *m_sourceBufferB, m_viewOffsetB))
+ , m_descriptorPool (createDescriptorPool(m_vki, m_device, m_descriptorType, m_shaderInterface))
+ , m_descriptorSetLayout (createDescriptorSetLayout(m_vki, m_device, m_descriptorType, m_shaderInterface, m_stageFlags))
+ , m_descriptorSet (createDescriptorSet(m_vki, m_device, *m_descriptorSetLayout, *m_descriptorPool, m_descriptorType, m_shaderInterface, *m_bufferViewA, *m_bufferViewB))
+ , m_pipelineLayout (createPipelineLayout(m_vki, m_device, *m_descriptorSetLayout))
+{
+ if (m_setDynamicOffset)
+ DE_ASSERT(isDynamicDescriptorType(m_descriptorType));
+ if (m_dynamicOffsetNonZero)
+ DE_ASSERT(m_setDynamicOffset);
+}
+
+vk::Move<vk::VkBuffer> BufferRenderInstance::createSourceBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ deUint32 offset,
+ deUint32 bufferSize,
+ de::MovePtr<vk::Allocation>* outMemory)
+{
+ static const float s_colors[] =
+ {
+ 0.0f, 1.0f, 0.0f, 1.0f, // green
+ 1.0f, 1.0f, 0.0f, 1.0f, // yellow
+ };
+ DE_STATIC_ASSERT(sizeof(s_colors) == BUFFER_DATA_SIZE);
+ DE_ASSERT(offset + BUFFER_DATA_SIZE <= bufferSize);
+ DE_ASSERT(offset % sizeof(float) == 0);
+ DE_ASSERT(bufferSize % sizeof(float) == 0);
+
+ const bool isUniformBuffer = isUniformDescriptorType(descriptorType);
+ const vk::VkBufferUsageFlags usageFlags = (isUniformBuffer) ? (vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : (vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
+ const float preGuardValue = 0.5f;
+ const float postGuardValue = 0.75f;
+ const vk::VkBufferCreateInfo bufferCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ bufferSize, // size
+ usageFlags, // usage
+ 0u, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ vk::Move<vk::VkBuffer> buffer (vk::createBuffer(vki, device, &bufferCreateInfo));
+ de::MovePtr<vk::Allocation> bufferMemory = allocateAndBindObjectMemory(vki, device, allocator, *buffer, vk::MemoryRequirement::HostVisible);
+ void* const mapPtr = bufferMemory->getHostPtr();
+
+ // guard with interesting values
+ for (size_t preGuardOffset = 0; preGuardOffset + sizeof(float) <= (size_t)offset; preGuardOffset += sizeof(float))
+ deMemcpy((deUint8*)mapPtr + preGuardOffset, &preGuardValue, sizeof(float));
+
+ deMemcpy((deUint8*)mapPtr + offset, s_colors, sizeof(s_colors));
+ for (size_t postGuardOffset = (size_t)offset + sizeof(s_colors); postGuardOffset + sizeof(float) <= (size_t)bufferSize; postGuardOffset += sizeof(float))
+ deMemcpy((deUint8*)mapPtr + postGuardOffset, &postGuardValue, sizeof(float));
+ deMemset((deUint8*)mapPtr + offset + sizeof(s_colors), 0x5A, (size_t)bufferSize - (size_t)offset - sizeof(s_colors)); // fill with interesting pattern that produces valid floats
+
+ flushMappedMemoryRange(vki, device, bufferMemory->getMemory(), bufferMemory->getOffset(), bufferSize);
+
+ *outMemory = bufferMemory;
+ return buffer;
+}
+
+vk::Move<vk::VkBufferView> BufferRenderInstance::createBufferView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkBuffer buffer,
+ deUint32 offset)
+{
+ const vk::VkBufferViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ DE_NULL,
+ buffer, // buffer
+ vk::VK_BUFFER_VIEW_TYPE_RAW, // viewType
+ vk::VK_FORMAT_UNDEFINED, // format
+ (vk::VkDeviceSize)offset, // offset
+ (vk::VkDeviceSize)BUFFER_DATA_SIZE // range
+ };
+ return vk::createBufferView(vki, device, &createInfo);
+}
+
+vk::Move<vk::VkDescriptorPool> BufferRenderInstance::createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface)
+{
+ return vk::DescriptorPoolBuilder()
+ .addType(descriptorType, getInterfaceNumResources(shaderInterface))
+ .build(vki, device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSetLayout> BufferRenderInstance::createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags)
+{
+ vk::DescriptorSetLayoutBuilder builder;
+
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleBinding(descriptorType, stageFlags);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleBinding(descriptorType, stageFlags);
+ builder.addSingleBinding(descriptorType, stageFlags);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArrayBinding(descriptorType, 2u, stageFlags);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ return builder.build(vki, device);
+}
+
+vk::Move<vk::VkDescriptorSet> BufferRenderInstance::createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout,
+ vk::VkDescriptorPool descriptorPool,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkBufferView viewA,
+ vk::VkBufferView viewB)
+{
+ const vk::VkDescriptorInfo bufferInfos[2] =
+ {
+ createDescriptorInfo(viewA),
+ createDescriptorInfo(viewB),
+ };
+
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(vki, device, descriptorPool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, descriptorSetLayout);
+ vk::DescriptorSetUpdateBuilder builder;
+
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &bufferInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &bufferInfos[0]);
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), descriptorType, &bufferInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, 2u, bufferInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(vki, device);
+ return descriptorSet;
+}
+
+vk::Move<vk::VkPipelineLayout> BufferRenderInstance::createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout)
+{
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ 1, // descriptorSetCount
+ &descriptorSetLayout, // pSetLayouts
+ 0u, // pushConstantRangeCount
+ DE_NULL, // pPushConstantRanges
+ };
+
+ return vk::createPipelineLayout(vki, device, &createInfo);
+}
+
+void BufferRenderInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Rendering 2x2 yellow-green grid.\n"
+ << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType) << "\n"
+ << "Buffer view(s) have " << ((m_setViewOffset) ? ("non-") : ("")) << "zero offset.\n";
+
+ if (isDynamicDescriptorType(m_descriptorType))
+ {
+ if (m_setDynamicOffset)
+ {
+ msg << "Source buffer(s) are given a dynamic offset at bind time.\n"
+ << "The supplied dynamic offset is " << ((m_dynamicOffsetNonZero) ? ("non-") : ("")) << "zero.\n";
+ }
+ else
+ {
+ msg << "Dynamic offset is not supplied at bind time. Expecting bind to offset 0.\n";
+ }
+ }
+
+ if (m_stageFlags == 0u)
+ {
+ msg << "Descriptors are not accessed in any shader stage.\n";
+ }
+ else
+ {
+ msg << "Descriptors are accessed in {"
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0) ? (" vertex") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0) ? (" tess_control") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0) ? (" tess_evaluation") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0) ? (" geometry") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0) ? (" fragment") : (""))
+ << " } stages.\n";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+vk::VkPipelineLayout BufferRenderInstance::getPipelineLayout (void) const
+{
+ return *m_pipelineLayout;
+}
+
+void BufferRenderInstance::writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const
+{
+ const bool isUniformBuffer = isUniformDescriptorType(m_descriptorType);
+
+ // \note dynamic offset replaces the view offset, i.e. it is not offset relative to the view offset
+ const deUint32 dynamicOffsets[] =
+ {
+ m_dynamicOffsetA,
+ m_dynamicOffsetB,
+ };
+ const deUint32 numOffsets = (!m_setDynamicOffset) ? (0u) : (getInterfaceNumResources(m_shaderInterface));
+ const deUint32* const dynamicOffsetPtr = (!m_setDynamicOffset) ? (DE_NULL) : (dynamicOffsets);
+
+ // make host writes device-visible
+ const vk::VkMemoryInputFlags inputBit = (isUniformBuffer) ? (vk::VK_MEMORY_INPUT_UNIFORM_READ_BIT) : (vk::VK_MEMORY_INPUT_SHADER_READ_BIT);
+ const vk::VkBufferMemoryBarrier memoryBarrierA =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // outputMask
+ inputBit, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *m_sourceBufferA, // buffer
+ 0u, // offset
+ (vk::VkDeviceSize)m_bufferSizeA, // size
+ };
+ const vk::VkBufferMemoryBarrier memoryBarrierB =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // outputMask
+ inputBit, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *m_sourceBufferB, // buffer
+ 0u, // offset
+ (vk::VkDeviceSize)m_bufferSizeB, // size
+ };
+ const void* const memoryBarriers[2] =
+ {
+ &memoryBarrierA,
+ &memoryBarrierB,
+ };
+ const deUint32 numMemoryBarriers = getInterfaceNumResources(m_shaderInterface);
+
+ m_vki.cmdBindDescriptorSets(cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, getPipelineLayout(), 0, 1, &m_descriptorSet.get(), numOffsets, dynamicOffsetPtr);
+ m_vki.cmdPipelineBarrier(cmd, 0u, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS, vk::VK_FALSE, numMemoryBarriers, memoryBarriers);
+ m_vki.cmdDraw(cmd, 0, 6 * 4, 0, 1); // render four quads (two separate triangles)
+}
+
+tcu::TestStatus BufferRenderInstance::verifyResultImage (const tcu::ConstPixelBufferAccess& result) const
+{
+ const tcu::Vec4 green (0.0f, 1.0f, 0.0f, 1.0f);
+ const tcu::Vec4 yellow (1.0f, 1.0f, 0.0f, 1.0f);
+ tcu::Surface reference (m_targetSize.x(), m_targetSize.y());
+
+ drawQuadrantReferenceResult(reference.getAccess(), yellow, green, green, yellow);
+
+ if (!bilinearCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", reference.getAccess(), result, tcu::RGBA(1, 1, 1, 1), tcu::COMPARE_LOG_RESULT))
+ return tcu::TestStatus::fail("Image verification failed");
+ else
+ return tcu::TestStatus::pass("Pass");
+}
+
+class ComputeInstanceResultBuffer
+{
+public:
+ enum
+ {
+ DATA_SIZE = sizeof(tcu::Vec4[4])
+ };
+
+ ComputeInstanceResultBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator);
+
+ void readResultContentsTo (tcu::Vec4 (*results)[4]) const;
+
+ inline vk::VkBuffer getBuffer (void) const { return *m_buffer; }
+ inline vk::VkBufferView getBufferView (void) const { return *m_bufferView; }
+ inline const void* getResultReadBarrier (void) const { return &m_bufferBarrier; }
+
+private:
+ static vk::Move<vk::VkBuffer> createResultBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ de::MovePtr<vk::Allocation>* outAllocation);
+
+ static vk::Move<vk::VkBufferView> createResultBufferView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkBuffer buffer);
+
+ static vk::VkBufferMemoryBarrier createResultBufferBarrier (vk::VkBuffer buffer);
+
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+
+ de::MovePtr<vk::Allocation> m_bufferMem;
+ const vk::Unique<vk::VkBuffer> m_buffer;
+ const vk::Unique<vk::VkBufferView> m_bufferView;
+ const vk::VkBufferMemoryBarrier m_bufferBarrier;
+};
+
+ComputeInstanceResultBuffer::ComputeInstanceResultBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator)
+ : m_vki (vki)
+ , m_device (device)
+ , m_bufferMem (DE_NULL)
+ , m_buffer (createResultBuffer(m_vki, m_device, allocator, &m_bufferMem))
+ , m_bufferView (createResultBufferView(m_vki, m_device, *m_buffer))
+ , m_bufferBarrier (createResultBufferBarrier(*m_buffer))
+{
+}
+
+void ComputeInstanceResultBuffer::readResultContentsTo (tcu::Vec4 (*results)[4]) const
+{
+ invalidateMappedMemoryRange(m_vki, m_device, m_bufferMem->getMemory(), m_bufferMem->getOffset(), sizeof(*results));
+ deMemcpy(*results, m_bufferMem->getHostPtr(), sizeof(*results));
+}
+
+vk::Move<vk::VkBuffer> ComputeInstanceResultBuffer::createResultBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ de::MovePtr<vk::Allocation>* outAllocation)
+{
+ const vk::VkBufferCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceSize)DATA_SIZE, // size
+ vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // usage
+ 0u, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ vk::Move<vk::VkBuffer> buffer (vk::createBuffer(vki, device, &createInfo));
+ de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(vki, device, allocator, *buffer, vk::MemoryRequirement::HostVisible));
+ const float clearValue = -1.0f;
+ void* mapPtr = allocation->getHostPtr();
+
+ for (size_t offset = 0; offset < DATA_SIZE; offset += sizeof(float))
+ deMemcpy(((deUint8*)mapPtr) + offset, &clearValue, sizeof(float));
+
+ flushMappedMemoryRange(vki, device, allocation->getMemory(), allocation->getOffset(), (vk::VkDeviceSize)DATA_SIZE);
+
+ *outAllocation = allocation;
+ return buffer;
+}
+
+vk::Move<vk::VkBufferView> ComputeInstanceResultBuffer::createResultBufferView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkBuffer buffer)
+{
+ const vk::VkBufferViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ DE_NULL,
+ buffer, // buffer
+ vk::VK_BUFFER_VIEW_TYPE_RAW, // viewType
+ vk::VK_FORMAT_UNDEFINED, // format
+ (vk::VkDeviceSize)0u, // offset
+ (vk::VkDeviceSize)DATA_SIZE // range
+ };
+ return vk::createBufferView(vki, device, &createInfo);
+}
+
+vk::VkBufferMemoryBarrier ComputeInstanceResultBuffer::createResultBufferBarrier (vk::VkBuffer buffer)
+{
+ const vk::VkBufferMemoryBarrier bufferBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_SHADER_WRITE_BIT, // outputMask
+ vk::VK_MEMORY_INPUT_HOST_READ_BIT, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ buffer, // buffer
+ (vk::VkDeviceSize)0u, // offset
+ DATA_SIZE, // size
+ };
+ return bufferBarrier;
+}
+
+class ComputePipeline
+{
+public:
+ ComputePipeline (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection,
+ deUint32 numDescriptorSets,
+ const vk::VkDescriptorSetLayout* descriptorSetLayouts);
+
+ inline vk::VkPipeline getPipeline (void) const { return *m_pipeline; };
+ inline vk::VkPipelineLayout getPipelineLayout (void) const { return *m_pipelineLayout; };
+
+private:
+ static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 numDescriptorSets,
+ const vk::VkDescriptorSetLayout* descriptorSetLayouts);
+
+ static vk::Move<vk::VkPipeline> createPipeline (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection,
+ vk::VkPipelineLayout layout);
+
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+ const vk::Unique<vk::VkPipeline> m_pipeline;
+};
+
+ComputePipeline::ComputePipeline (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection,
+ deUint32 numDescriptorSets,
+ const vk::VkDescriptorSetLayout* descriptorSetLayouts)
+ : m_pipelineLayout (createPipelineLayout(vki, device, numDescriptorSets, descriptorSetLayouts))
+ , m_pipeline (createPipeline(vki, device, programCollection, *m_pipelineLayout))
+{
+}
+
+vk::Move<vk::VkPipelineLayout> ComputePipeline::createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 numDescriptorSets,
+ const vk::VkDescriptorSetLayout* descriptorSetLayouts)
+{
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ numDescriptorSets, // descriptorSetCount
+ descriptorSetLayouts, // pSetLayouts
+ 0u, // pushConstantRangeCount
+ DE_NULL, // pPushConstantRanges
+ };
+ return vk::createPipelineLayout(vki, device, &createInfo);
+}
+
+vk::Move<vk::VkPipeline> ComputePipeline::createPipeline (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const vk::BinaryCollection& programCollection,
+ vk::VkPipelineLayout layout)
+{
+ const vk::Unique<vk::VkShaderModule> computeModule (vk::createShaderModule(vki, device, programCollection.get("compute"), (vk::VkShaderModuleCreateFlags)0u));
+ const vk::VkShaderCreateInfo shaderCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SHADER_CREATE_INFO,
+ DE_NULL,
+ *computeModule, // module
+ "main", // pName
+ 0u // flags
+ };
+ const vk::Unique<vk::VkShader> computeShader (vk::createShader(vki, device, &shaderCreateInfo));
+ const vk::VkPipelineShaderStageCreateInfo cs =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ vk::VK_SHADER_STAGE_COMPUTE, // stage
+ *computeShader, // shader
+ DE_NULL, // pSpecializationInfo
+ };
+ const vk::VkComputePipelineCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ DE_NULL,
+ cs, // cs
+ 0u, // flags
+ layout, // layout
+ (vk::VkPipeline)0, // basePipelineHandle
+ 0u, // basePipelineIndex
+ };
+ return createComputePipeline(vki, device, (vk::VkPipelineCache)0u, &createInfo);
+}
+
+class ComputeCommand
+{
+public:
+ ComputeCommand (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkPipeline pipeline,
+ vk::VkPipelineLayout pipelineLayout,
+ const tcu::UVec3& numWorkGroups,
+ int numDescriptorSets,
+ const vk::VkDescriptorSet* descriptorSets,
+ int numDynamicOffsets,
+ const deUint32* dynamicOffsets,
+ int numPreBarriers,
+ const void* const* preBarriers,
+ int numPostBarriers,
+ const void* const* postBarriers);
+
+ void submitAndWait (deUint32 queueFamilyIndex, vk::VkQueue queue) const;
+
+private:
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+ const vk::VkPipeline m_pipeline;
+ const vk::VkPipelineLayout m_pipelineLayout;
+ const tcu::UVec3& m_numWorkGroups;
+ const int m_numDescriptorSets;
+ const vk::VkDescriptorSet* const m_descriptorSets;
+ const int m_numDynamicOffsets;
+ const deUint32* const m_dynamicOffsets;
+ const int m_numPreBarriers;
+ const void* const* const m_preBarriers;
+ const int m_numPostBarriers;
+ const void* const* const m_postBarriers;
+};
+
+ComputeCommand::ComputeCommand (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkPipeline pipeline,
+ vk::VkPipelineLayout pipelineLayout,
+ const tcu::UVec3& numWorkGroups,
+ int numDescriptorSets,
+ const vk::VkDescriptorSet* descriptorSets,
+ int numDynamicOffsets,
+ const deUint32* dynamicOffsets,
+ int numPreBarriers,
+ const void* const* preBarriers,
+ int numPostBarriers,
+ const void* const* postBarriers)
+ : m_vki (vki)
+ , m_device (device)
+ , m_pipeline (pipeline)
+ , m_pipelineLayout (pipelineLayout)
+ , m_numWorkGroups (numWorkGroups)
+ , m_numDescriptorSets (numDescriptorSets)
+ , m_descriptorSets (descriptorSets)
+ , m_numDynamicOffsets (numDynamicOffsets)
+ , m_dynamicOffsets (dynamicOffsets)
+ , m_numPreBarriers (numPreBarriers)
+ , m_preBarriers (preBarriers)
+ , m_numPostBarriers (numPostBarriers)
+ , m_postBarriers (postBarriers)
+{
+}
+
+void ComputeCommand::submitAndWait (deUint32 queueFamilyIndex, vk::VkQueue queue) const
+{
+ const vk::VkCmdPoolCreateInfo cmdPoolCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO,
+ DE_NULL,
+ queueFamilyIndex, // queueFamilyIndex
+ vk::VK_CMD_POOL_CREATE_TRANSIENT_BIT, // flags
+ };
+ const vk::Unique<vk::VkCmdPool> cmdPool (vk::createCommandPool(m_vki, m_device, &cmdPoolCreateInfo));
+
+ const vk::VkFenceCreateInfo fenceCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+ 0u, // flags
+ };
+
+ const vk::VkCmdBufferCreateInfo cmdBufCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
+ DE_NULL,
+ *cmdPool, // cmdPool
+ vk::VK_CMD_BUFFER_LEVEL_PRIMARY, // level
+ 0u, // flags
+ };
+ const vk::VkCmdBufferBeginInfo cmdBufBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | vk::VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // flags
+ (vk::VkRenderPass)0u, // renderPass
+ (vk::VkFramebuffer)0u, // framebuffer
+ };
+
+ const vk::Unique<vk::VkFence> cmdCompleteFence (vk::createFence(m_vki, m_device, &fenceCreateInfo));
+ const vk::Unique<vk::VkCmdBuffer> cmd (vk::createCommandBuffer(m_vki, m_device, &cmdBufCreateInfo));
+ const deUint64 infiniteTimeout = ~(deUint64)0u;
+
+ VK_CHECK(m_vki.beginCommandBuffer(*cmd, &cmdBufBeginInfo));
+
+ m_vki.cmdBindPipeline(*cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline);
+ m_vki.cmdBindDescriptorSets(*cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipelineLayout, 0, m_numDescriptorSets, m_descriptorSets, m_numDynamicOffsets, m_dynamicOffsets);
+
+ if (m_numPreBarriers)
+ m_vki.cmdPipelineBarrier(*cmd, 0u, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_FALSE, m_numPreBarriers, m_preBarriers);
+
+ m_vki.cmdDispatch(*cmd, m_numWorkGroups.x(), m_numWorkGroups.y(), m_numWorkGroups.z());
+ m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_FALSE, m_numPostBarriers, m_postBarriers);
+ VK_CHECK(m_vki.endCommandBuffer(*cmd));
+
+ // run
+ VK_CHECK(m_vki.queueSubmit(queue, 1, &cmd.get(), *cmdCompleteFence));
+ VK_CHECK(m_vki.waitForFences(m_device, 1, &cmdCompleteFence.get(), 0u, infiniteTimeout)); // \note: timeout is failure
+}
+
+class BufferComputeInstance : public vkt::TestInstance
+{
+public:
+ BufferComputeInstance (Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ bool viewOffset,
+ bool dynamicOffset,
+ bool dynamicOffsetNonZero);
+
+private:
+ vk::Move<vk::VkBuffer> createColorDataBuffer (deUint32 offset, deUint32 bufferSize, const tcu::Vec4& value1, const tcu::Vec4& value2, de::MovePtr<vk::Allocation>* outAllocation);
+ vk::Move<vk::VkBufferView> createBufferView (vk::VkBuffer buffer, deUint32 offset) const;
+ vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
+ vk::Move<vk::VkDescriptorPool> createDescriptorPool (void) const;
+ vk::Move<vk::VkDescriptorSet> createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout, vk::VkBufferView viewA, vk::VkBufferView viewB, vk::VkBufferView viewRes) const;
+
+ tcu::TestStatus iterate (void);
+ void logTestPlan (void) const;
+ tcu::TestStatus testResourceAccess (void);
+
+ enum
+ {
+ STATIC_OFFSET_VALUE_A = 256,
+ DYNAMIC_OFFSET_VALUE_A = 512,
+ STATIC_OFFSET_VALUE_B = 1024,
+ DYNAMIC_OFFSET_VALUE_B = 768,
+ };
+
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const bool m_setViewOffset;
+ const bool m_setDynamicOffset;
+ const bool m_dynamicOffsetNonZero;
+
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+ const vk::VkQueue m_queue;
+ const deUint32 m_queueFamilyIndex;
+ vk::Allocator& m_allocator;
+
+ const ComputeInstanceResultBuffer m_result;
+};
+
+BufferComputeInstance::BufferComputeInstance (Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ bool viewOffset,
+ bool dynamicOffset,
+ bool dynamicOffsetNonZero)
+ : vkt::TestInstance (context)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_setViewOffset (viewOffset)
+ , m_setDynamicOffset (dynamicOffset)
+ , m_dynamicOffsetNonZero (dynamicOffsetNonZero)
+ , m_vki (context.getDeviceInterface())
+ , m_device (context.getDevice())
+ , m_queue (context.getUniversalQueue())
+ , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
+ , m_allocator (context.getDefaultAllocator())
+ , m_result (m_vki, m_device, m_allocator)
+{
+ if (m_dynamicOffsetNonZero)
+ DE_ASSERT(m_setDynamicOffset);
+}
+
+vk::Move<vk::VkBuffer> BufferComputeInstance::createColorDataBuffer (deUint32 offset, deUint32 bufferSize, const tcu::Vec4& value1, const tcu::Vec4& value2, de::MovePtr<vk::Allocation>* outAllocation)
+{
+ DE_ASSERT(offset + sizeof(tcu::Vec4[2]) <= bufferSize);
+
+ const bool isUniformBuffer = isUniformDescriptorType(m_descriptorType);
+ const vk::VkBufferUsageFlags usageFlags = (isUniformBuffer) ? (vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : (vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
+ const vk::VkBufferCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceSize)bufferSize, // size
+ usageFlags, // usage
+ 0u, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ vk::Move<vk::VkBuffer> buffer (vk::createBuffer(m_vki, m_device, &createInfo));
+ de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(m_vki, m_device, m_allocator, *buffer, vk::MemoryRequirement::HostVisible));
+ void* mapPtr = allocation->getHostPtr();
+
+ if (offset)
+ deMemset(mapPtr, 0x5A, (size_t)offset);
+ deMemcpy((deUint8*)mapPtr + offset, value1.getPtr(), sizeof(tcu::Vec4));
+ deMemcpy((deUint8*)mapPtr + offset + sizeof(tcu::Vec4), value2.getPtr(), sizeof(tcu::Vec4));
+ deMemset((deUint8*)mapPtr + offset + 2 * sizeof(tcu::Vec4), 0x5A, (size_t)bufferSize - (size_t)offset - 2 * sizeof(tcu::Vec4));
+
+ flushMappedMemoryRange(m_vki, m_device, allocation->getMemory(), allocation->getOffset(), bufferSize);
+
+ *outAllocation = allocation;
+ return buffer;
+}
+
+vk::Move<vk::VkBufferView> BufferComputeInstance::createBufferView (vk::VkBuffer buffer, deUint32 offset) const
+{
+ const vk::VkBufferViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ DE_NULL,
+ buffer, // buffer
+ vk::VK_BUFFER_VIEW_TYPE_RAW, // viewType
+ vk::VK_FORMAT_UNDEFINED, // format
+ (vk::VkDeviceSize)offset, // offset
+ (vk::VkDeviceSize)sizeof(tcu::Vec4[2]) // range
+ };
+ return vk::createBufferView(m_vki, m_device, &createInfo);
+}
+
+vk::Move<vk::VkDescriptorSetLayout> BufferComputeInstance::createDescriptorSetLayout (void) const
+{
+ vk::DescriptorSetLayoutBuilder builder;
+
+ builder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArrayBinding(m_descriptorType, 2u, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ };
+
+ return builder.build(m_vki, m_device);
+}
+
+vk::Move<vk::VkDescriptorPool> BufferComputeInstance::createDescriptorPool (void) const
+{
+ return vk::DescriptorPoolBuilder()
+ .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
+ .addType(m_descriptorType, getInterfaceNumResources(m_shaderInterface))
+ .build(m_vki, m_device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> BufferComputeInstance::createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout, vk::VkBufferView viewA, vk::VkBufferView viewB, vk::VkBufferView viewRes) const
+{
+ const vk::VkDescriptorInfo resultInfo = createDescriptorInfo(viewRes);
+ const vk::VkDescriptorInfo bufferInfos[2] =
+ {
+ createDescriptorInfo(viewA),
+ createDescriptorInfo(viewB),
+ };
+
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(m_vki, m_device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // result
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultInfo);
+
+ // buffers
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, &bufferInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, &bufferInfos[0]);
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), m_descriptorType, &bufferInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, 2u, bufferInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(m_vki, m_device);
+ return descriptorSet;
+}
+
+tcu::TestStatus BufferComputeInstance::iterate (void)
+{
+ logTestPlan();
+ return testResourceAccess();
+}
+
+void BufferComputeInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Accessing resource in a compute program.\n"
+ << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " source descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType)
+ << " and one destination VK_DESCRIPTOR_TYPE_STORAGE_BUFFER to store results to.\n"
+ << "Source descriptor buffer view(s) have " << ((m_setViewOffset) ? ("non-") : ("")) << "zero offset.\n";
+
+ if (isDynamicDescriptorType(m_descriptorType))
+ {
+ if (m_setDynamicOffset)
+ {
+ msg << "Source buffer(s) are given a dynamic offset at bind time.\n"
+ << "The supplied dynamic offset is " << ((m_dynamicOffsetNonZero) ? ("non-") : ("")) << "zero.\n";
+ }
+ else
+ {
+ msg << "Dynamic offset is not supplied at bind time. Expecting bind to offset 0.\n";
+ }
+ }
+
+ msg << "Destination buffer is pre-initialized to -1.\n";
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+tcu::TestStatus BufferComputeInstance::testResourceAccess (void)
+{
+ enum
+ {
+ ADDRESSABLE_SIZE = 256, // allocate a lot more than required
+ };
+
+ const bool isDynamicCase = isDynamicDescriptorType(m_descriptorType);
+ const bool isUniformBuffer = isUniformDescriptorType(m_descriptorType);
+ const deUint32 bindTimeOffsets[] =
+ {
+ (m_dynamicOffsetNonZero) ? ((deUint32)DYNAMIC_OFFSET_VALUE_A) : (0u),
+ (m_dynamicOffsetNonZero) ? ((deUint32)DYNAMIC_OFFSET_VALUE_B) : (0u),
+ };
+
+ const tcu::Vec4 colorA1 = tcu::Vec4(0.0f, 1.0f, 0.0f, 1.0f);
+ const tcu::Vec4 colorA2 = tcu::Vec4(1.0f, 1.0f, 0.0f, 1.0f);
+ const tcu::Vec4 colorB1 = tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f);
+ const tcu::Vec4 colorB2 = tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f);
+
+ const deUint32 dataOffsetA = (isDynamicCase) ? (bindTimeOffsets[0]) : (m_setViewOffset) ? ((deUint32)STATIC_OFFSET_VALUE_A) : (0u);
+ const deUint32 dataOffsetB = (isDynamicCase) ? (bindTimeOffsets[1]) : (m_setViewOffset) ? ((deUint32)STATIC_OFFSET_VALUE_B) : (0u);
+ const deUint32 viewOffsetA = (m_setViewOffset) ? ((deUint32)STATIC_OFFSET_VALUE_A) : (0u);
+ const deUint32 viewOffsetB = (m_setViewOffset) ? ((deUint32)STATIC_OFFSET_VALUE_B) : (0u);
+ const deUint32 bufferSizeA = dataOffsetA + ADDRESSABLE_SIZE;
+ const deUint32 bufferSizeB = dataOffsetB + ADDRESSABLE_SIZE;
+
+ de::MovePtr<vk::Allocation> bufferMemA;
+ const vk::Unique<vk::VkBuffer> bufferA (createColorDataBuffer(dataOffsetA, bufferSizeA, colorA1, colorA2, &bufferMemA));
+ const vk::Unique<vk::VkBufferView> bufferViewA (createBufferView(*bufferA, viewOffsetA));
+
+ de::MovePtr<vk::Allocation> bufferMemB;
+ const vk::Unique<vk::VkBuffer> bufferB ((getInterfaceNumResources(m_shaderInterface) == 1u)
+ ? (vk::Move<vk::VkBuffer>())
+ : (createColorDataBuffer(dataOffsetB, bufferSizeB, colorB1, colorB2, &bufferMemB)));
+ const vk::Unique<vk::VkBufferView> bufferViewB ((getInterfaceNumResources(m_shaderInterface) == 1u)
+ ? (vk::Move<vk::VkBufferView>())
+ : (createBufferView(*bufferB, viewOffsetB)));
+
+ const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
+ const vk::Unique<vk::VkDescriptorPool> descriptorPool (createDescriptorPool());
+ const vk::Unique<vk::VkDescriptorSet> descriptorSet (createDescriptorSet(*descriptorPool, *descriptorSetLayout, *bufferViewA, *bufferViewB, m_result.getBufferView()));
+ const ComputePipeline pipeline (m_vki, m_device, m_context.getBinaryCollection(), 1, &descriptorSetLayout.get());
+
+ const vk::VkMemoryInputFlags inputBit = (isUniformBuffer) ? (vk::VK_MEMORY_INPUT_UNIFORM_READ_BIT) : (vk::VK_MEMORY_INPUT_SHADER_READ_BIT);
+ const vk::VkBufferMemoryBarrier bufferBarrierA =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // outputMask
+ inputBit, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *bufferA, // buffer
+ (vk::VkDeviceSize)0u, // offset
+ (vk::VkDeviceSize)bufferSizeA, // size
+ };
+ const vk::VkBufferMemoryBarrier bufferBarrierB =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // outputMask
+ inputBit, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *bufferB, // buffer
+ (vk::VkDeviceSize)0u, // offset
+ (vk::VkDeviceSize)bufferSizeB, // size
+ };
+
+ const deUint32 numSrcBuffers = getInterfaceNumResources(m_shaderInterface);
+
+ const vk::VkDescriptorSet descriptorSets[] = { *descriptorSet };
+ const int numDescriptorSets = DE_LENGTH_OF_ARRAY(descriptorSets);
+ const deUint32* const dynamicOffsets = (m_setDynamicOffset) ? (bindTimeOffsets) : (DE_NULL);
+ const deUint32 numDynamicOffsets = (m_setDynamicOffset) ? (numSrcBuffers) : (0);
+ const void* const preBarriers[] = { &bufferBarrierA, &bufferBarrierB };
+ const int numPreBarriers = numSrcBuffers;
+ const void* const postBarriers[] = { m_result.getResultReadBarrier() };
+ const int numPostBarriers = DE_LENGTH_OF_ARRAY(postBarriers);
+
+ const ComputeCommand compute (m_vki,
+ m_device,
+ pipeline.getPipeline(),
+ pipeline.getPipelineLayout(),
+ tcu::UVec3(4, 1, 1),
+ numDescriptorSets, descriptorSets,
+ numDynamicOffsets, dynamicOffsets,
+ numPreBarriers, preBarriers,
+ numPostBarriers, postBarriers);
+
+ const tcu::Vec4 refQuadrantValue14 = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? (colorA2) :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? (colorB2) :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? (colorB2) :
+ (tcu::Vec4(-2.0f));
+ const tcu::Vec4 refQuadrantValue23 = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? (colorA1) :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? (colorA1) :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? (colorA1) :
+ (tcu::Vec4(-2.0f));
+ const tcu::Vec4 references[4] =
+ {
+ refQuadrantValue14,
+ refQuadrantValue23,
+ refQuadrantValue23,
+ refQuadrantValue14,
+ };
+ tcu::Vec4 results[4];
+
+ compute.submitAndWait(m_queueFamilyIndex, m_queue);
+ m_result.readResultContentsTo(&results);
+
+ // verify
+ if (results[0] == references[0] &&
+ results[1] == references[1] &&
+ results[2] == references[2] &&
+ results[3] == references[3])
+ {
+ return tcu::TestStatus::pass("Pass");
+ }
+ else if (results[0] == tcu::Vec4(-1.0f) &&
+ results[1] == tcu::Vec4(-1.0f) &&
+ results[2] == tcu::Vec4(-1.0f) &&
+ results[3] == tcu::Vec4(-1.0f))
+ {
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Result buffer was not written to."
+ << tcu::TestLog::EndMessage;
+ return tcu::TestStatus::fail("Result buffer was not written to");
+ }
+ else
+ {
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Error expected ["
+ << references[0] << ", "
+ << references[1] << ", "
+ << references[2] << ", "
+ << references[3] << "], got ["
+ << results[0] << ", "
+ << results[1] << ", "
+ << results[2] << ", "
+ << results[3] << "]"
+ << tcu::TestLog::EndMessage;
+ return tcu::TestStatus::fail("Invalid result values");
+ }
+}
+
+class QuadrantRendederCase : public vkt::TestCase
+{
+public:
+ QuadrantRendederCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ glu::GLSLVersion glslVersion,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages);
+private:
+ virtual std::string genExtensionDeclarations (vk::VkShaderStage stage) const = 0;
+ virtual std::string genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const = 0;
+ virtual std::string genResourceAccessSource (vk::VkShaderStage stage) const = 0;
+ virtual std::string genNoAccessSource (void) const = 0;
+
+ std::string genVertexSource (void) const;
+ std::string genTessCtrlSource (void) const;
+ std::string genTessEvalSource (void) const;
+ std::string genGeometrySource (void) const;
+ std::string genFragmentSource (void) const;
+ std::string genComputeSource (void) const;
+
+ void initPrograms (vk::SourceCollections& programCollection) const;
+
+protected:
+ const glu::GLSLVersion m_glslVersion;
+ const vk::VkShaderStageFlags m_exitingStages;
+ const vk::VkShaderStageFlags m_activeStages;
+};
+
+QuadrantRendederCase::QuadrantRendederCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ glu::GLSLVersion glslVersion,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages)
+ : vkt::TestCase (testCtx, name, description)
+ , m_glslVersion (glslVersion)
+ , m_exitingStages (exitingStages)
+ , m_activeStages (activeStages)
+{
+ DE_ASSERT((m_exitingStages & m_activeStages) == m_activeStages);
+}
+
+std::string QuadrantRendederCase::genVertexSource (void) const
+{
+ const char* const nextStageName = ((m_exitingStages & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0u) ? ("tsc")
+ : ((m_exitingStages & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0u) ? ("geo")
+ : ((m_exitingStages & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0u) ? ("frag")
+ : (DE_NULL);
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+ std::ostringstream buf;
+
+ if ((m_activeStages & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0u)
+ {
+ // active vertex shader
+ buf << versionDecl << "\n"
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_VERTEX)
+ << genResourceDeclarations(vk::VK_SHADER_STAGE_VERTEX, 0)
+ << "layout(location = 0) out highp vec4 " << nextStageName << "_color;\n"
+ << "layout(location = 1) flat out highp int " << nextStageName << "_quadrant_id;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp vec4 result_position;\n"
+ << " highp int quadrant_id;\n"
+ << s_quadrantGenVertexPosSource
+ << " gl_Position = result_position;\n"
+ << " " << nextStageName << "_quadrant_id = quadrant_id;\n"
+ << "\n"
+ << " highp vec4 result_color;\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_VERTEX)
+ << " " << nextStageName << "_color = result_color;\n"
+ << "}\n";
+ }
+ else
+ {
+ // do nothing
+ buf << versionDecl << "\n"
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_VERTEX)
+ << "layout(location = 1) flat out highp int " << nextStageName << "_quadrant_id;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp vec4 result_position;\n"
+ << " highp int quadrant_id;\n"
+ << s_quadrantGenVertexPosSource
+ << " gl_Position = result_position;\n"
+ << " " << nextStageName << "_quadrant_id = quadrant_id;\n"
+ << "}\n";
+ }
+
+ return buf.str();
+}
+
+std::string QuadrantRendederCase::genTessCtrlSource (void) const
+{
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+ const bool extRequired = glu::glslVersionIsES(m_glslVersion) && m_glslVersion <= glu::GLSL_VERSION_310_ES;
+ const char* const tessExtDecl = extRequired ? "#extension GL_EXT_tessellation_shader : require\n" : "";
+ std::ostringstream buf;
+
+ if ((m_activeStages & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0u)
+ {
+ // contributing not implemented
+ DE_ASSERT(m_activeStages == vk::VK_SHADER_STAGE_TESS_CONTROL_BIT);
+
+ // active tc shader
+ buf << versionDecl << "\n"
+ << tessExtDecl
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_TESS_CONTROL)
+ << "layout(vertices=3) out;\n"
+ << genResourceDeclarations(vk::VK_SHADER_STAGE_TESS_CONTROL, 0)
+ << "layout(location = 1) flat in highp int tsc_quadrant_id[];\n"
+ << "layout(location = 0) out highp vec4 tes_color[];\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp vec4 result_color;\n"
+ << " highp int quadrant_id = tsc_quadrant_id[gl_InvocationID];\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_TESS_CONTROL)
+ << "\n"
+ << " tes_color[gl_InvocationID] = result_color;\n"
+ << "\n"
+ << " // no dynamic input block indexing\n"
+ << " highp vec4 position;\n"
+ << " if (gl_InvocationID == 0)\n"
+ << " position = gl_in[0].gl_Position;\n"
+ << " else if (gl_InvocationID == 1)\n"
+ << " position = gl_in[1].gl_Position;\n"
+ << " else\n"
+ << " position = gl_in[2].gl_Position;\n"
+ << " gl_out[gl_InvocationID].gl_Position = position;\n"
+ << " gl_TessLevelInner[0] = 2.8;\n"
+ << " gl_TessLevelInner[1] = 2.8;\n"
+ << " gl_TessLevelOuter[0] = 2.8;\n"
+ << " gl_TessLevelOuter[1] = 2.8;\n"
+ << " gl_TessLevelOuter[2] = 2.8;\n"
+ << " gl_TessLevelOuter[3] = 2.8;\n"
+ << "}\n";
+ }
+ else if ((m_activeStages & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0u)
+ {
+ // active te shader, tc passthru
+ buf << versionDecl << "\n"
+ << tessExtDecl
+ << "layout(vertices=3) out;\n"
+ << "layout(location = 1) flat in highp int tsc_quadrant_id[];\n"
+ << "layout(location = 1) flat out highp int tes_quadrant_id[];\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " tes_quadrant_id[gl_InvocationID] = tsc_quadrant_id[0];\n"
+ << "\n"
+ << " // no dynamic input block indexing\n"
+ << " highp vec4 position;\n"
+ << " if (gl_InvocationID == 0)\n"
+ << " position = gl_in[0].gl_Position;\n"
+ << " else if (gl_InvocationID == 1)\n"
+ << " position = gl_in[1].gl_Position;\n"
+ << " else\n"
+ << " position = gl_in[2].gl_Position;\n"
+ << " gl_out[gl_InvocationID].gl_Position = position;\n"
+ << " gl_TessLevelInner[0] = 2.8;\n"
+ << " gl_TessLevelInner[1] = 2.8;\n"
+ << " gl_TessLevelOuter[0] = 2.8;\n"
+ << " gl_TessLevelOuter[1] = 2.8;\n"
+ << " gl_TessLevelOuter[2] = 2.8;\n"
+ << " gl_TessLevelOuter[3] = 2.8;\n"
+ << "}\n";
+ }
+ else
+ {
+ // passthrough not implemented
+ DE_FATAL("not implemented");
+ }
+
+ return buf.str();
+}
+
+std::string QuadrantRendederCase::genTessEvalSource (void) const
+{
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+ const bool extRequired = glu::glslVersionIsES(m_glslVersion) && m_glslVersion <= glu::GLSL_VERSION_310_ES;
+ const char* const tessExtDecl = extRequired ? "#extension GL_EXT_tessellation_shader : require\n" : "";
+ std::ostringstream buf;
+
+ if ((m_activeStages & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0u)
+ {
+ // contributing not implemented
+ DE_ASSERT(m_activeStages == vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT);
+
+ // active te shader
+ buf << versionDecl << "\n"
+ << tessExtDecl
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_TESS_EVALUATION)
+ << "layout(triangles) in;\n"
+ << genResourceDeclarations(vk::VK_SHADER_STAGE_TESS_EVALUATION, 0)
+ << "layout(location = 1) flat in highp int tes_quadrant_id[];\n"
+ << "layout(location = 0) out highp vec4 frag_color;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp vec4 result_color;\n"
+ << " highp int quadrant_id = tes_quadrant_id[0];\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_TESS_EVALUATION)
+ << "\n"
+ << " frag_color = result_color;\n"
+ << " gl_Position = gl_TessCoord.x * gl_in[0].gl_Position + gl_TessCoord.y * gl_in[1].gl_Position + gl_TessCoord.z * gl_in[2].gl_Position;\n"
+ << "}\n";
+ }
+ else if ((m_activeStages & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0u)
+ {
+ // contributing not implemented
+ DE_ASSERT(m_activeStages == vk::VK_SHADER_STAGE_TESS_CONTROL_BIT);
+
+ // active tc shader, te is passthru
+ buf << versionDecl << "\n"
+ << tessExtDecl
+ << "layout(triangles) in;\n"
+ << "layout(location = 0) in highp vec4 tes_color[];\n"
+ << "layout(location = 0) out highp vec4 frag_color;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " frag_color = tes_color[0];\n"
+ << " gl_Position = gl_TessCoord.x * gl_in[0].gl_Position + gl_TessCoord.y * gl_in[1].gl_Position + gl_TessCoord.z * gl_in[2].gl_Position;\n"
+ << "}\n";
+ }
+ else
+ {
+ // passthrough not implemented
+ DE_FATAL("not implemented");
+ }
+
+ return buf.str();
+}
+
+std::string QuadrantRendederCase::genGeometrySource (void) const
+{
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+ const bool extRequired = glu::glslVersionIsES(m_glslVersion) && m_glslVersion <= glu::GLSL_VERSION_310_ES;
+ const char* const geomExtDecl = extRequired ? "#extension GL_EXT_geometry_shader : require\n" : "";
+ std::ostringstream buf;
+
+ if ((m_activeStages & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0u)
+ {
+ // contributing not implemented
+ DE_ASSERT(m_activeStages == vk::VK_SHADER_STAGE_GEOMETRY_BIT);
+
+ // active geometry shader
+ buf << versionDecl << "\n"
+ << geomExtDecl
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_GEOMETRY)
+ << "layout(triangles) in;\n"
+ << "layout(triangle_strip, max_vertices=4) out;\n"
+ << genResourceDeclarations(vk::VK_SHADER_STAGE_GEOMETRY, 0)
+ << "layout(location = 1) flat in highp int geo_quadrant_id[];\n"
+ << "layout(location = 0) out highp vec4 frag_color;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp int quadrant_id;\n"
+ << " highp vec4 result_color;\n"
+ << "\n"
+ << " quadrant_id = geo_quadrant_id[0];\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_GEOMETRY)
+ << " frag_color = result_color;\n"
+ << " gl_Position = gl_in[0].gl_Position;\n"
+ << " EmitVertex();\n"
+ << "\n"
+ << " quadrant_id = geo_quadrant_id[1];\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_GEOMETRY)
+ << " frag_color = result_color;\n"
+ << " gl_Position = gl_in[1].gl_Position;\n"
+ << " EmitVertex();\n"
+ << "\n"
+ << " quadrant_id = geo_quadrant_id[2];\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_GEOMETRY)
+ << " frag_color = result_color;\n"
+ << " gl_Position = gl_in[0].gl_Position * 0.5 + gl_in[2].gl_Position * 0.5;\n"
+ << " EmitVertex();\n"
+ << "\n"
+ << " quadrant_id = geo_quadrant_id[0];\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_GEOMETRY)
+ << " frag_color = result_color;\n"
+ << " gl_Position = gl_in[2].gl_Position;\n"
+ << " EmitVertex();\n"
+ << "}\n";
+ }
+ else
+ {
+ // passthrough not implemented
+ DE_FATAL("not implemented");
+ }
+
+ return buf.str();
+}
+
+std::string QuadrantRendederCase::genFragmentSource (void) const
+{
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+ std::ostringstream buf;
+
+ if ((m_activeStages & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0u)
+ {
+ buf << versionDecl << "\n"
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_GEOMETRY)
+ << genResourceDeclarations(vk::VK_SHADER_STAGE_FRAGMENT, 0);
+
+ if (m_activeStages != vk::VK_SHADER_STAGE_FRAGMENT_BIT)
+ {
+ // there are other stages, this is just a contributor
+ buf << "layout(location = 0) in mediump vec4 frag_color;\n";
+ }
+
+ buf << "layout(location = 1) flat in highp int frag_quadrant_id;\n"
+ << "layout(location = 0) out mediump vec4 o_color;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp int quadrant_id = frag_quadrant_id;\n"
+ << " highp vec4 result_color;\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_FRAGMENT);
+
+ if (m_activeStages != vk::VK_SHADER_STAGE_FRAGMENT_BIT)
+ {
+ // just contributor
+ buf << " if (frag_quadrant_id < 2)\n"
+ << " o_color = result_color;\n"
+ << " else\n"
+ << " o_color = frag_color;\n";
+ }
+ else
+ buf << " o_color = result_color;\n";
+
+ buf << "}\n";
+ }
+ else if (m_activeStages == 0u)
+ {
+ // special case, no active stages
+ buf << versionDecl << "\n"
+ << "layout(location = 1) flat in highp int frag_quadrant_id;\n"
+ << "layout(location = 0) out mediump vec4 o_color;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " highp int quadrant_id = frag_quadrant_id;\n"
+ << " highp vec4 result_color;\n"
+ << genNoAccessSource()
+ << " o_color = result_color;\n"
+ << "}\n";
+ }
+ else
+ {
+ // passthrough
+ buf << versionDecl << "\n"
+ << "layout(location = 0) in mediump vec4 frag_color;\n"
+ "layout(location = 0) out mediump vec4 o_color;\n"
+ "void main (void)\n"
+ "{\n"
+ " o_color = frag_color;\n"
+ "}\n";
+ }
+
+ return buf.str();
+}
+
+std::string QuadrantRendederCase::genComputeSource (void) const
+{
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+ std::ostringstream buf;
+
+ buf << versionDecl << "\n"
+ << genExtensionDeclarations(vk::VK_SHADER_STAGE_COMPUTE)
+ << "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+ << genResourceDeclarations(vk::VK_SHADER_STAGE_COMPUTE, 1)
+ << "layout(set = 0, binding = 0, std140) writeonly buffer OutBuf\n"
+ << "{\n"
+ << " highp vec4 read_colors[4];\n"
+ << "} b_out;\n"
+ << "void main(void)\n"
+ << "{\n"
+ << " highp int quadrant_id = int(gl_WorkGroupID.x);\n"
+ << " highp vec4 result_color;\n"
+ << genResourceAccessSource(vk::VK_SHADER_STAGE_COMPUTE)
+ << " b_out.read_colors[gl_WorkGroupID.x] = result_color;\n"
+ << "}\n";
+
+ return buf.str();
+}
+
+void QuadrantRendederCase::initPrograms (vk::SourceCollections& programCollection) const
+{
+ if ((m_exitingStages & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0u)
+ programCollection.glslSources.add("vertex") << glu::VertexSource(genVertexSource());
+
+ if ((m_exitingStages & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0u)
+ programCollection.glslSources.add("tess_ctrl") << glu::TessellationControlSource(genTessCtrlSource());
+
+ if ((m_exitingStages & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0u)
+ programCollection.glslSources.add("tess_eval") << glu::TessellationEvaluationSource(genTessEvalSource());
+
+ if ((m_exitingStages & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0u)
+ programCollection.glslSources.add("geometry") << glu::GeometrySource(genGeometrySource());
+
+ if ((m_exitingStages & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0u)
+ programCollection.glslSources.add("fragment") << glu::FragmentSource(genFragmentSource());
+
+ if ((m_exitingStages & vk::VK_SHADER_STAGE_COMPUTE_BIT) != 0u)
+ programCollection.glslSources.add("compute") << glu::ComputeSource(genComputeSource());
+}
+
+class BufferDescriptorCase : public QuadrantRendederCase
+{
+public:
+ enum
+ {
+ FLAG_VIEW_OFFSET = (1u << 1u),
+ FLAG_DYNAMIC_OFFSET_ZERO = (1u << 2u),
+ FLAG_DYNAMIC_OFFSET_NONZERO = (1u << 3u),
+ };
+ // enum continues where resource flags ends
+ DE_STATIC_ASSERT((deUint32)FLAG_VIEW_OFFSET == (deUint32)RESOURCE_FLAG_LAST);
+
+ BufferDescriptorCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface shaderInterface,
+ deUint32 flags);
+
+private:
+ std::string genExtensionDeclarations (vk::VkShaderStage stage) const;
+ std::string genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const;
+ std::string genResourceAccessSource (vk::VkShaderStage stage) const;
+ std::string genNoAccessSource (void) const;
+
+ vkt::TestInstance* createInstance (vkt::Context& context) const;
+
+ const bool m_viewOffset;
+ const bool m_dynamicOffsetSet;
+ const bool m_dynamicOffsetNonZero;
+ const bool m_isPrimaryCmdBuf;
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+};
+
+BufferDescriptorCase::BufferDescriptorCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface shaderInterface,
+ deUint32 flags)
+ : QuadrantRendederCase (testCtx, name, description, glu::GLSL_VERSION_310_ES, exitingStages, activeStages)
+ , m_viewOffset ((flags & FLAG_VIEW_OFFSET) != 0u)
+ , m_dynamicOffsetSet ((flags & (FLAG_DYNAMIC_OFFSET_ZERO | FLAG_DYNAMIC_OFFSET_NONZERO)) != 0u)
+ , m_dynamicOffsetNonZero ((flags & FLAG_DYNAMIC_OFFSET_NONZERO) != 0u)
+ , m_isPrimaryCmdBuf (isPrimaryCmdBuf)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+{
+}
+
+std::string BufferDescriptorCase::genExtensionDeclarations (vk::VkShaderStage stage) const
+{
+ DE_UNREF(stage);
+ return "";
+}
+
+std::string BufferDescriptorCase::genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const
+{
+ DE_UNREF(stage);
+
+ const bool isUniform = isUniformDescriptorType(m_descriptorType);
+ const char* const storageType = (isUniform) ? ("uniform") : ("buffer");
+ std::ostringstream buf;
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ buf << "layout(set = 0, binding = " << (numUsedBindings) << ", std140) " << storageType << " BufferName\n"
+ << "{\n"
+ << " highp vec4 colorA;\n"
+ << " highp vec4 colorB;\n"
+ << "} b_instance;\n";
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ buf << "layout(set = 0, binding = " << (numUsedBindings) << ", std140) " << storageType << " BufferNameA\n"
+ << "{\n"
+ << " highp vec4 colorA;\n"
+ << " highp vec4 colorB;\n"
+ << "} b_instanceA;\n"
+ << "layout(set = 0, binding = " << (numUsedBindings+1) << ", std140) " << storageType << " BufferNameB\n"
+ << "{\n"
+ << " highp vec4 colorA;\n"
+ << " highp vec4 colorB;\n"
+ << "} b_instanceB;\n";
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ buf << "layout(set = 0, binding = " << (numUsedBindings) << ", std140) " << storageType << " BufferName\n"
+ << "{\n"
+ << " highp vec4 colorA;\n"
+ << " highp vec4 colorB;\n"
+ << "} b_instances[2];\n";
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ return buf.str();
+}
+
+std::string BufferDescriptorCase::genResourceAccessSource (vk::VkShaderStage stage) const
+{
+ DE_UNREF(stage);
+
+ std::ostringstream buf;
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ buf << " if (quadrant_id == 1 || quadrant_id == 2)\n"
+ << " result_color = b_instance.colorA;\n"
+ << " else\n"
+ << " result_color = b_instance.colorB;\n";
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ buf << " if (quadrant_id == 1 || quadrant_id == 2)\n"
+ << " result_color = b_instanceA.colorA;\n"
+ << " else\n"
+ << " result_color = b_instanceB.colorB;\n";
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ buf << " if (quadrant_id == 1 || quadrant_id == 2)\n"
+ << " result_color = b_instances[0].colorA;\n"
+ << " else\n"
+ << " result_color = b_instances[1].colorB;\n";
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ return buf.str();
+}
+
+std::string BufferDescriptorCase::genNoAccessSource (void) const
+{
+ return " if (quadrant_id == 1 || quadrant_id == 2)\n"
+ " result_color = vec4(0.0, 1.0, 0.0, 1.0);\n"
+ " else\n"
+ " result_color = vec4(1.0, 1.0, 0.0, 1.0);\n";
+}
+
+vkt::TestInstance* BufferDescriptorCase::createInstance (vkt::Context& context) const
+{
+ if (m_exitingStages == vk::VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ DE_ASSERT(m_isPrimaryCmdBuf); // secondaries are only valid within renderpass
+ return new BufferComputeInstance(context, m_descriptorType, m_shaderInterface, m_viewOffset, m_dynamicOffsetSet, m_dynamicOffsetNonZero);
+ }
+ else
+ return new BufferRenderInstance(context, m_isPrimaryCmdBuf, m_descriptorType, m_activeStages, m_shaderInterface, m_viewOffset, m_dynamicOffsetSet, m_dynamicOffsetNonZero);
+}
+
+class ImageInstanceImages
+{
+public:
+ ImageInstanceImages (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ vk::VkImageViewType viewType,
+ int numImages,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice);
+
+private:
+ static vk::Move<vk::VkImage> createImage (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ vk::VkImageViewType viewType,
+ const tcu::TextureLevelPyramid& sourceImage,
+ de::MovePtr<vk::Allocation>* outAllocation);
+
+ static vk::Move<vk::VkImageView> createImageView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkImageViewType viewType,
+ const tcu::TextureLevelPyramid& sourceImage,
+ vk::VkImage image,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice);
+
+ void populateSourceImage (tcu::TextureLevelPyramid* dst,
+ bool isFirst) const;
+
+ void uploadImage (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkImage image,
+ const tcu::TextureLevelPyramid& data);
+
+protected:
+ enum
+ {
+ IMAGE_SIZE = 64,
+ NUM_MIP_LEVELS = 2,
+ ARRAY_SIZE = 2,
+ };
+
+ const vk::VkImageViewType m_viewType;
+ const deUint32 m_baseMipLevel;
+ const deUint32 m_baseArraySlice;
+
+ const tcu::TextureFormat m_imageFormat;
+ tcu::TextureLevelPyramid m_sourceImageA;
+ tcu::TextureLevelPyramid m_sourceImageB;
+
+ de::MovePtr<vk::Allocation> m_imageMemoryA;
+ de::MovePtr<vk::Allocation> m_imageMemoryB;
+ vk::Move<vk::VkImage> m_imageA;
+ vk::Move<vk::VkImage> m_imageB;
+ vk::Move<vk::VkImageView> m_imageViewA;
+ vk::Move<vk::VkImageView> m_imageViewB;
+};
+
+ImageInstanceImages::ImageInstanceImages (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ vk::VkImageViewType viewType,
+ int numImages,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice)
+ : m_viewType (viewType)
+ , m_baseMipLevel (baseMipLevel)
+ , m_baseArraySlice (baseArraySlice)
+ , m_imageFormat (tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8)
+ , m_sourceImageA (m_imageFormat, NUM_MIP_LEVELS)
+ , m_sourceImageB (m_imageFormat, NUM_MIP_LEVELS)
+ , m_imageMemoryA (DE_NULL)
+ , m_imageMemoryB (DE_NULL)
+ , m_imageA (vk::Move<vk::VkImage>())
+ , m_imageB (vk::Move<vk::VkImage>())
+ , m_imageViewA (vk::Move<vk::VkImageView>())
+ , m_imageViewB (vk::Move<vk::VkImageView>())
+{
+ DE_ASSERT(numImages == 1 || numImages == 2);
+
+ populateSourceImage(&m_sourceImageA, true);
+ m_imageA = createImage(vki, device, allocator, descriptorType, viewType, m_sourceImageA, &m_imageMemoryA);
+ m_imageViewA = createImageView(vki, device, viewType, m_sourceImageA, *m_imageA, m_baseMipLevel, m_baseArraySlice);
+ uploadImage(vki, device, queueFamilyIndex, queue, allocator, *m_imageA, m_sourceImageA);
+
+ if (numImages == 2)
+ {
+ populateSourceImage(&m_sourceImageB, false);
+ m_imageB = createImage(vki, device, allocator, descriptorType, viewType, m_sourceImageB, &m_imageMemoryB);
+ m_imageViewB = createImageView(vki, device, viewType, m_sourceImageB, *m_imageB, m_baseMipLevel, m_baseArraySlice);
+ uploadImage(vki, device, queueFamilyIndex, queue, allocator, *m_imageB, m_sourceImageB);
+ }
+}
+
+vk::Move<vk::VkImage> ImageInstanceImages::createImage (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ vk::VkImageViewType viewType,
+ const tcu::TextureLevelPyramid& sourceImage,
+ de::MovePtr<vk::Allocation>* outAllocation)
+{
+ const tcu::ConstPixelBufferAccess baseLevel = sourceImage.getLevel(0);
+ const bool isCube = (viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
+ const bool isStorage = (descriptorType == vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
+ const deUint32 readUsage = (isStorage) ? (vk::VK_IMAGE_USAGE_STORAGE_BIT) : (vk::VK_IMAGE_USAGE_SAMPLED_BIT);
+ const deUint32 arraySize = (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (baseLevel.getHeight())
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_2D || viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? (baseLevel.getDepth())
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (1)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (baseLevel.getDepth()) // cube: numFaces * numLayers
+ : (0);
+ const vk::VkExtent3D extent =
+ {
+ // x
+ (deInt32)baseLevel.getWidth(),
+
+ // y
+ (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (1) : (deInt32)baseLevel.getHeight(),
+
+ // z
+ (viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? ((deInt32)baseLevel.getDepth()) : (1),
+ };
+ const vk::VkImageCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ DE_NULL,
+ viewTypeToImageType(viewType), // imageType
+ mapToVkTextureFormat(baseLevel.getFormat()), // format
+ extent, // extent
+ (deUint32)sourceImage.getNumLevels(), // mipLevels
+ arraySize, // arraySize
+ 1, // samples
+ vk::VK_IMAGE_TILING_OPTIMAL, // tiling
+ readUsage | vk::VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT, // usage
+ isCube ? ((deUint32)vk::VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) : (0u), // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ vk::Move<vk::VkImage> image (vk::createImage(vki, device, &createInfo));
+
+ *outAllocation = allocateAndBindObjectMemory(vki, device, allocator, *image, vk::MemoryRequirement::Any);
+ return image;
+}
+
+vk::Move<vk::VkImageView> ImageInstanceImages::createImageView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkImageViewType viewType,
+ const tcu::TextureLevelPyramid& sourceImage,
+ vk::VkImage image,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice)
+{
+ const tcu::ConstPixelBufferAccess baseLevel = sourceImage.getLevel(0);
+ const deUint32 viewTypeBaseSlice = (viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (6 * baseArraySlice) : (baseArraySlice);
+ const deUint32 viewArraySize = (viewType == vk::VK_IMAGE_VIEW_TYPE_1D) ? (1)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (baseLevel.getHeight() - viewTypeBaseSlice)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_2D) ? (1)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? (baseLevel.getDepth() - viewTypeBaseSlice)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (1)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE) ? (6)
+ : (viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (baseLevel.getDepth() - viewTypeBaseSlice) // cube: numFaces * numLayers
+ : (0);
+
+ DE_ASSERT(viewArraySize > 0);
+
+ const vk::VkImageSubresourceRange resourceRange =
+ {
+ vk::VK_IMAGE_ASPECT_COLOR, // aspect
+ baseMipLevel, // baseMipLevel
+ sourceImage.getNumLevels() - baseMipLevel, // mipLevels
+ viewTypeBaseSlice, // baseArraySlice
+ viewArraySize, // arraySize
+ };
+ const vk::VkImageViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ DE_NULL,
+ image, // image
+ viewType, // viewType
+ mapToVkTextureFormat(baseLevel.getFormat()), // format
+ {
+ vk::VK_CHANNEL_SWIZZLE_R,
+ vk::VK_CHANNEL_SWIZZLE_G,
+ vk::VK_CHANNEL_SWIZZLE_B,
+ vk::VK_CHANNEL_SWIZZLE_A
+ }, // channels
+ resourceRange, // subresourceRange
+ };
+ return vk::createImageView(vki, device, &createInfo);
+}
+
+void ImageInstanceImages::populateSourceImage (tcu::TextureLevelPyramid* dst, bool isFirst) const
+{
+ const int numLevels = dst->getNumLevels();
+
+ for (int level = 0; level < numLevels; ++level)
+ {
+ const int width = IMAGE_SIZE >> level;
+ const int height = (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (ARRAY_SIZE)
+ : (IMAGE_SIZE >> level);
+ const int depth = (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (1)
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? (ARRAY_SIZE)
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (6 * ARRAY_SIZE)
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (IMAGE_SIZE >> level)
+ : (1);
+
+ dst->allocLevel(level, width, height, depth);
+
+ {
+ const tcu::PixelBufferAccess levelAccess = dst->getLevel(level);
+
+ for (int z = 0; z < depth; ++z)
+ for (int y = 0; y < height; ++y)
+ for (int x = 0; x < width; ++x)
+ {
+ const int gradPos = x + y + z;
+ const int gradMax = width + height + depth - 3;
+
+ const int red = 255 * gradPos / gradMax; //!< gradient from 0 -> max (detects large offset errors)
+ const int green = ((gradPos % 2 == 0) ? (127) : (0)) + ((gradPos % 4 < 3) ? (128) : (0)); //!< 3-level M pattern (detects small offset errors)
+ const int blue = (128 * level / numLevels) + (isFirst ? 127 : 0); //!< level and image index (detects incorrect lod / image)
+
+ DE_ASSERT(de::inRange(red, 0, 255));
+ DE_ASSERT(de::inRange(green, 0, 255));
+ DE_ASSERT(de::inRange(blue, 0, 255));
+
+ levelAccess.setPixel(tcu::IVec4(red, green, blue, 255), x, y, z);
+ }
+ }
+ }
+}
+
+void ImageInstanceImages::uploadImage (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkImage image,
+ const tcu::TextureLevelPyramid& data)
+{
+ const deUint32 arraySize = (m_viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (1) :
+ (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (6 * (deUint32)ARRAY_SIZE) :
+ ((deUint32)ARRAY_SIZE);
+ const deUint32 dataBufferSize = getTextureLevelPyramidDataSize(data);
+ const vk::VkBufferCreateInfo bufferCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ dataBufferSize, // size
+ vk::VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT, // usage
+ 0u, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ const vk::Unique<vk::VkBuffer> dataBuffer (vk::createBuffer(vki, device, &bufferCreateInfo));
+ const de::MovePtr<vk::Allocation> dataBufferMemory = allocateAndBindObjectMemory(vki, device, allocator, *dataBuffer, vk::MemoryRequirement::HostVisible);
+ const vk::VkFenceCreateInfo fenceCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+ 0u, // flags
+ };
+ const vk::VkBufferMemoryBarrier preMemoryBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // outputMask
+ vk::VK_MEMORY_INPUT_TRANSFER_BIT, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ *dataBuffer, // buffer
+ 0u, // offset
+ dataBufferSize, // size
+ };
+ const vk::VkImageSubresourceRange fullSubrange =
+ {
+ vk::VK_IMAGE_ASPECT_COLOR, // aspect
+ 0u, // baseMipLevel
+ (deUint32)data.getNumLevels(), // mipLevels
+ 0u, // baseArraySlice
+ arraySize, // arraySize
+ };
+ const vk::VkImageMemoryBarrier preImageBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ 0u, // outputMask
+ 0u, // inputMask
+ vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
+ vk::VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL, // newLayout
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ image, // image
+ fullSubrange // subresourceRange
+ };
+ const vk::VkImageMemoryBarrier postImageBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_TRANSFER_BIT, // outputMask
+ vk::VK_MEMORY_INPUT_SHADER_READ_BIT, // inputMask
+ vk::VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL, // oldLayout
+ vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // newLayout
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ image, // image
+ fullSubrange // subresourceRange
+ };
+ const vk::VkCmdPoolCreateInfo cmdPoolCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO,
+ DE_NULL,
+ queueFamilyIndex, // queueFamilyIndex
+ vk::VK_CMD_POOL_CREATE_TRANSIENT_BIT, // flags
+ };
+ const vk::Unique<vk::VkCmdPool> cmdPool (vk::createCommandPool(vki, device, &cmdPoolCreateInfo));
+ const vk::VkCmdBufferCreateInfo cmdBufCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
+ DE_NULL,
+ *cmdPool, // cmdPool
+ vk::VK_CMD_BUFFER_LEVEL_PRIMARY, // level
+ 0u, // flags
+ };
+ const vk::VkCmdBufferBeginInfo cmdBufBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | vk::VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // flags
+ (vk::VkRenderPass)0u, // renderPass
+ (vk::VkFramebuffer)0u, // framebuffer
+ };
+
+ const vk::Unique<vk::VkCmdBuffer> cmd (vk::createCommandBuffer(vki, device, &cmdBufCreateInfo));
+ const void* const preBarriers[2] = { &preMemoryBarrier, &preImageBarrier };
+ const void* const postBarriers[1] = { &postImageBarrier };
+ const vk::Unique<vk::VkFence> cmdCompleteFence (vk::createFence(vki, device, &fenceCreateInfo));
+ const deUint64 infiniteTimeout = ~(deUint64)0u;
+ std::vector<vk::VkBufferImageCopy> copySlices;
+
+ // copy data to buffer
+ writeTextureLevelPyramidData(dataBufferMemory->getHostPtr(), dataBufferSize, data, m_viewType , ©Slices);
+ flushMappedMemoryRange(vki, device, dataBufferMemory->getMemory(), dataBufferMemory->getOffset(), dataBufferSize);
+
+ // record command buffer
+ VK_CHECK(vki.beginCommandBuffer(*cmd, &cmdBufBeginInfo));
+ vki.cmdPipelineBarrier(*cmd, 0u, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_FALSE, DE_LENGTH_OF_ARRAY(preBarriers), preBarriers);
+ vki.cmdCopyBufferToImage(*cmd, *dataBuffer, image, vk::VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL, (deUint32)copySlices.size(), ©Slices[0]);
+ vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_FALSE, DE_LENGTH_OF_ARRAY(postBarriers), postBarriers);
+ VK_CHECK(vki.endCommandBuffer(*cmd));
+
+ // submit and wait for command buffer to complete before killing it
+ VK_CHECK(vki.queueSubmit(queue, 1, &cmd.get(), *cmdCompleteFence));
+ VK_CHECK(vki.waitForFences(device, 1, &cmdCompleteFence.get(), 0u, infiniteTimeout)); // \note: timeout is failure
+}
+
+class ImageFetchInstanceImages : private ImageInstanceImages
+{
+public:
+ ImageFetchInstanceImages (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice);
+
+ static tcu::IVec3 getFetchPos (vk::VkImageViewType viewType, deUint32 baseMipLevel, deUint32 baseArraySlice, int fetchPosNdx);
+ tcu::Vec4 fetchImageValue (int fetchPosNdx) const;
+
+ inline vk::VkImageView getImageViewA (void) const { return *m_imageViewA; }
+ inline vk::VkImageView getImageViewB (void) const { return *m_imageViewB; }
+
+private:
+ enum
+ {
+ // some arbitrary sample points for all four quadrants
+ SAMPLE_POINT_0_X = 6,
+ SAMPLE_POINT_0_Y = 13,
+ SAMPLE_POINT_0_Z = 49,
+
+ SAMPLE_POINT_1_X = 51,
+ SAMPLE_POINT_1_Y = 40,
+ SAMPLE_POINT_1_Z = 44,
+
+ SAMPLE_POINT_2_X = 42,
+ SAMPLE_POINT_2_Y = 26,
+ SAMPLE_POINT_2_Z = 19,
+
+ SAMPLE_POINT_3_X = 25,
+ SAMPLE_POINT_3_Y = 25,
+ SAMPLE_POINT_3_Z = 18,
+ };
+
+ const ShaderInputInterface m_shaderInterface;
+};
+
+ImageFetchInstanceImages::ImageFetchInstanceImages (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice)
+ : ImageInstanceImages (vki,
+ device,
+ queueFamilyIndex,
+ queue,
+ allocator,
+ descriptorType,
+ viewType,
+ getInterfaceNumResources(shaderInterface), // numImages
+ baseMipLevel,
+ baseArraySlice)
+ , m_shaderInterface (shaderInterface)
+{
+}
+
+bool isImageViewTypeArray (vk::VkImageViewType type)
+{
+ return type == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY || type == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY || type == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+}
+
+tcu::IVec3 ImageFetchInstanceImages::getFetchPos (vk::VkImageViewType viewType, deUint32 baseMipLevel, deUint32 baseArraySlice, int fetchPosNdx)
+{
+ const tcu::IVec3 fetchPositions[4] =
+ {
+ tcu::IVec3(SAMPLE_POINT_0_X, SAMPLE_POINT_0_Y, SAMPLE_POINT_0_Z),
+ tcu::IVec3(SAMPLE_POINT_1_X, SAMPLE_POINT_1_Y, SAMPLE_POINT_1_Z),
+ tcu::IVec3(SAMPLE_POINT_2_X, SAMPLE_POINT_2_Y, SAMPLE_POINT_2_Z),
+ tcu::IVec3(SAMPLE_POINT_3_X, SAMPLE_POINT_3_Y, SAMPLE_POINT_3_Z),
+ };
+ const tcu::IVec3 coord = de::getSizedArrayElement<4>(fetchPositions, fetchPosNdx);
+ const deUint32 imageSize = (deUint32)IMAGE_SIZE >> baseMipLevel;
+ const deUint32 arraySize = isImageViewTypeArray(viewType) ? ARRAY_SIZE - baseArraySlice : 1;
+
+ switch (viewType)
+ {
+ case vk::VK_IMAGE_VIEW_TYPE_1D:
+ case vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY: return tcu::IVec3(coord.x() % imageSize, coord.y() % arraySize, 0);
+ case vk::VK_IMAGE_VIEW_TYPE_2D:
+ case vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY: return tcu::IVec3(coord.x() % imageSize, coord.y() % imageSize, coord.z() % arraySize);
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE:
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return tcu::IVec3(coord.x() % imageSize, coord.y() % imageSize, coord.z() % (arraySize * 6));
+ case vk::VK_IMAGE_VIEW_TYPE_3D: return tcu::IVec3(coord.x() % imageSize, coord.y() % imageSize, coord.z() % imageSize);
+ default:
+ DE_FATAL("Impossible");
+ return tcu::IVec3();
+ }
+}
+
+tcu::Vec4 ImageFetchInstanceImages::fetchImageValue (int fetchPosNdx) const
+{
+ DE_ASSERT(de::inBounds(fetchPosNdx, 0, 4));
+
+ const tcu::TextureLevelPyramid& fetchSrcA = m_sourceImageA;
+ const tcu::TextureLevelPyramid& fetchSrcB = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? (m_sourceImageA) : (m_sourceImageB);
+ const tcu::TextureLevelPyramid& fetchSrc = ((fetchPosNdx % 2) == 0) ? (fetchSrcA) : (fetchSrcB); // sampling order is ABAB
+ const tcu::IVec3 fetchPos = getFetchPos(m_viewType, m_baseMipLevel, m_baseArraySlice, fetchPosNdx);
+
+ return fetchSrc.getLevel(m_baseMipLevel).getPixel(fetchPos.x(), fetchPos.y(), fetchPos.z());
+}
+
+class ImageFetchRenderInstance : public SingleCmdRenderInstance
+{
+public:
+ ImageFetchRenderInstance (vkt::Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice);
+
+private:
+ static vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags);
+
+ static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout);
+
+ static vk::Move<vk::VkDescriptorPool> createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface);
+
+ static vk::Move<vk::VkDescriptorSet> createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkDescriptorSetLayout layout,
+ vk::VkDescriptorPool pool,
+ vk::VkImageView viewA,
+ vk::VkImageView viewB);
+
+ void logTestPlan (void) const;
+ vk::VkPipelineLayout getPipelineLayout (void) const;
+ void writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const;
+ tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const;
+
+ enum
+ {
+ RENDER_SIZE = 128,
+ };
+
+ const vk::VkDescriptorType m_descriptorType;
+ const vk::VkShaderStageFlags m_stageFlags;
+ const ShaderInputInterface m_shaderInterface;
+ const vk::VkImageViewType m_viewType;
+ const deUint32 m_baseMipLevel;
+ const deUint32 m_baseArraySlice;
+
+ const vk::Unique<vk::VkDescriptorSetLayout> m_descriptorSetLayout;
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+ const ImageFetchInstanceImages m_images;
+ const vk::Unique<vk::VkDescriptorPool> m_descriptorPool;
+ const vk::Unique<vk::VkDescriptorSet> m_descriptorSet;
+};
+
+ImageFetchRenderInstance::ImageFetchRenderInstance (vkt::Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice)
+ : SingleCmdRenderInstance (context, isPrimaryCmdBuf, tcu::UVec2(RENDER_SIZE, RENDER_SIZE))
+ , m_descriptorType (descriptorType)
+ , m_stageFlags (stageFlags)
+ , m_shaderInterface (shaderInterface)
+ , m_viewType (viewType)
+ , m_baseMipLevel (baseMipLevel)
+ , m_baseArraySlice (baseArraySlice)
+ , m_descriptorSetLayout (createDescriptorSetLayout(m_vki, m_device, m_descriptorType, m_shaderInterface, m_stageFlags))
+ , m_pipelineLayout (createPipelineLayout(m_vki, m_device, *m_descriptorSetLayout))
+ , m_images (m_vki, m_device, m_queueFamilyIndex, m_queue, m_allocator, m_descriptorType, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice)
+ , m_descriptorPool (createDescriptorPool(m_vki, m_device, m_descriptorType, m_shaderInterface))
+ , m_descriptorSet (createDescriptorSet(m_vki, m_device, m_descriptorType, m_shaderInterface, *m_descriptorSetLayout, *m_descriptorPool, m_images.getImageViewA(), m_images.getImageViewB()))
+{
+}
+
+vk::Move<vk::VkDescriptorSetLayout> ImageFetchRenderInstance::createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags)
+{
+ vk::DescriptorSetLayoutBuilder builder;
+
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleBinding(descriptorType, stageFlags);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleBinding(descriptorType, stageFlags);
+ builder.addSingleBinding(descriptorType, stageFlags);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArrayBinding(descriptorType, 2u, stageFlags);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ return builder.build(vki, device);
+}
+
+vk::Move<vk::VkPipelineLayout> ImageFetchRenderInstance::createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout)
+{
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ 1, // descriptorSetCount
+ &descriptorSetLayout, // pSetLayouts
+ 0u, // pushConstantRangeCount
+ DE_NULL, // pPushConstantRanges
+ };
+ return vk::createPipelineLayout(vki, device, &createInfo);
+}
+
+vk::Move<vk::VkDescriptorPool> ImageFetchRenderInstance::createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface)
+{
+ return vk::DescriptorPoolBuilder()
+ .addType(descriptorType, getInterfaceNumResources(shaderInterface))
+ .build(vki, device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> ImageFetchRenderInstance::createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkDescriptorSetLayout layout,
+ vk::VkDescriptorPool pool,
+ vk::VkImageView viewA,
+ vk::VkImageView viewB)
+{
+ const vk::VkDescriptorInfo imageInfos[2] =
+ {
+ createDescriptorInfo(viewA, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ createDescriptorInfo(viewB, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ };
+
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(vki, device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+ vk::DescriptorSetUpdateBuilder builder;
+
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &imageInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &imageInfos[0]);
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), descriptorType, &imageInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, 2u, imageInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(vki, device);
+ return descriptorSet;
+}
+
+void ImageFetchRenderInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Rendering 2x2 grid.\n"
+ << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType) << "\n"
+ << "Image view type is " << vk::getImageViewTypeName(m_viewType) << "\n";
+
+ if (m_baseMipLevel)
+ msg << "Image view base mip level = " << m_baseMipLevel << "\n";
+ if (m_baseArraySlice)
+ msg << "Image view base array slice = " << m_baseArraySlice << "\n";
+
+ if (m_stageFlags == 0u)
+ {
+ msg << "Descriptors are not accessed in any shader stage.\n";
+ }
+ else
+ {
+ msg << "Color in each cell is fetched using the descriptor(s):\n";
+
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ msg << "Test sample " << resultNdx << ": fetching at position " << m_images.getFetchPos(m_viewType, m_baseMipLevel, m_baseArraySlice, resultNdx);
+
+ if (m_shaderInterface != SHADER_INPUT_SINGLE_DESCRIPTOR)
+ {
+ const int srcResourceNdx = (resultNdx % 2); // ABAB source
+ msg << " from descriptor " << srcResourceNdx;
+ }
+
+ msg << "\n";
+ }
+
+ msg << "Descriptors are accessed in {"
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0) ? (" vertex") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0) ? (" tess_control") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0) ? (" tess_evaluation") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0) ? (" geometry") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0) ? (" fragment") : (""))
+ << " } stages.";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+vk::VkPipelineLayout ImageFetchRenderInstance::getPipelineLayout (void) const
+{
+ return *m_pipelineLayout;
+}
+
+void ImageFetchRenderInstance::writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const
+{
+ m_vki.cmdBindDescriptorSets(cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, getPipelineLayout(), 0, 1, &m_descriptorSet.get(), 0, DE_NULL);
+ m_vki.cmdDraw(cmd, 0, 6 * 4, 0, 1); // render four quads (two separate triangles)
+}
+
+tcu::TestStatus ImageFetchRenderInstance::verifyResultImage (const tcu::ConstPixelBufferAccess& result) const
+{
+ const tcu::Vec4 green (0.0f, 1.0f, 0.0f, 1.0f);
+ const tcu::Vec4 yellow (1.0f, 1.0f, 0.0f, 1.0f);
+ const bool doFetch = (m_stageFlags != 0u); // no active stages? Then don't fetch
+ const tcu::Vec4 sample0 = (!doFetch) ? (yellow) : (m_images.fetchImageValue(0));
+ const tcu::Vec4 sample1 = (!doFetch) ? (green) : (m_images.fetchImageValue(1));
+ const tcu::Vec4 sample2 = (!doFetch) ? (green) : (m_images.fetchImageValue(2));
+ const tcu::Vec4 sample3 = (!doFetch) ? (yellow) : (m_images.fetchImageValue(3));
+ tcu::Surface reference (m_targetSize.x(), m_targetSize.y());
+
+ drawQuadrantReferenceResult(reference.getAccess(), sample0, sample1, sample2, sample3);
+
+ if (!bilinearCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", reference.getAccess(), result, tcu::RGBA(1, 1, 1, 1), tcu::COMPARE_LOG_RESULT))
+ return tcu::TestStatus::fail("Image verification failed");
+ else
+ return tcu::TestStatus::pass("Pass");
+}
+
+class ImageFetchComputeInstance : public vkt::TestInstance
+{
+public:
+ ImageFetchComputeInstance (vkt::Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice);
+
+private:
+ vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
+ vk::Move<vk::VkDescriptorPool> createDescriptorPool (void) const;
+ vk::Move<vk::VkDescriptorSet> createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout) const;
+
+ tcu::TestStatus iterate (void);
+ void logTestPlan (void) const;
+ tcu::TestStatus testResourceAccess (void);
+
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const vk::VkImageViewType m_viewType;
+ const deUint32 m_baseMipLevel;
+ const deUint32 m_baseArraySlice;
+
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+ const vk::VkQueue m_queue;
+ const deUint32 m_queueFamilyIndex;
+ vk::Allocator& m_allocator;
+
+ const ComputeInstanceResultBuffer m_result;
+ const ImageFetchInstanceImages m_images;
+};
+
+ImageFetchComputeInstance::ImageFetchComputeInstance (Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice)
+ : vkt::TestInstance (context)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_viewType (viewType)
+ , m_baseMipLevel (baseMipLevel)
+ , m_baseArraySlice (baseArraySlice)
+ , m_vki (context.getDeviceInterface())
+ , m_device (context.getDevice())
+ , m_queue (context.getUniversalQueue())
+ , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
+ , m_allocator (context.getDefaultAllocator())
+ , m_result (m_vki, m_device, m_allocator)
+ , m_images (m_vki, m_device, m_queueFamilyIndex, m_queue, m_allocator, m_descriptorType, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice)
+{
+}
+
+vk::Move<vk::VkDescriptorSetLayout> ImageFetchComputeInstance::createDescriptorSetLayout (void) const
+{
+ vk::DescriptorSetLayoutBuilder builder;
+
+ builder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArrayBinding(m_descriptorType, 2u, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ };
+
+ return builder.build(m_vki, m_device);
+}
+
+vk::Move<vk::VkDescriptorPool> ImageFetchComputeInstance::createDescriptorPool (void) const
+{
+ return vk::DescriptorPoolBuilder()
+ .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
+ .addType(m_descriptorType, getInterfaceNumResources(m_shaderInterface))
+ .build(m_vki, m_device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> ImageFetchComputeInstance::createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout) const
+{
+ const vk::VkDescriptorInfo resultInfo = createDescriptorInfo(m_result.getBufferView());
+ const vk::VkDescriptorInfo imageInfos[2] =
+ {
+ createDescriptorInfo(m_images.getImageViewA(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ createDescriptorInfo(m_images.getImageViewB(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ };
+
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(m_vki, m_device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // result
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultInfo);
+
+ // images
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, &imageInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, &imageInfos[0]);
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), m_descriptorType, &imageInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, 2u, imageInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(m_vki, m_device);
+ return descriptorSet;
+}
+
+tcu::TestStatus ImageFetchComputeInstance::iterate (void)
+{
+ logTestPlan();
+ return testResourceAccess();
+}
+
+void ImageFetchComputeInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Fetching 4 values from image in compute shader.\n"
+ << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType) << "\n"
+ << "Image view type is " << vk::getImageViewTypeName(m_viewType) << "\n";
+
+ if (m_baseMipLevel)
+ msg << "Image view base mip level = " << m_baseMipLevel << "\n";
+ if (m_baseArraySlice)
+ msg << "Image view base array slice = " << m_baseArraySlice << "\n";
+
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ msg << "Test sample " << resultNdx << ": fetch at position " << m_images.getFetchPos(m_viewType, m_baseMipLevel, m_baseArraySlice, resultNdx);
+
+ if (m_shaderInterface != SHADER_INPUT_SINGLE_DESCRIPTOR)
+ {
+ const int srcResourceNdx = (resultNdx % 2); // ABAB source
+ msg << " from descriptor " << srcResourceNdx;
+ }
+
+ msg << "\n";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+tcu::TestStatus ImageFetchComputeInstance::testResourceAccess (void)
+{
+ const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
+ const vk::Unique<vk::VkDescriptorPool> descriptorPool (createDescriptorPool());
+ const vk::Unique<vk::VkDescriptorSet> descriptorSet (createDescriptorSet(*descriptorPool, *descriptorSetLayout));
+ const ComputePipeline pipeline (m_vki, m_device, m_context.getBinaryCollection(), 1, &descriptorSetLayout.get());
+
+ const vk::VkDescriptorSet descriptorSets[] = { *descriptorSet };
+ const int numDescriptorSets = DE_LENGTH_OF_ARRAY(descriptorSets);
+ const deUint32* const dynamicOffsets = DE_NULL;
+ const int numDynamicOffsets = 0;
+ const void* const* preBarriers = DE_NULL;
+ const int numPreBarriers = 0;
+ const void* const postBarriers[] = { m_result.getResultReadBarrier() };
+ const int numPostBarriers = DE_LENGTH_OF_ARRAY(postBarriers);
+
+ const ComputeCommand compute (m_vki,
+ m_device,
+ pipeline.getPipeline(),
+ pipeline.getPipelineLayout(),
+ tcu::UVec3(4, 1, 1),
+ numDescriptorSets, descriptorSets,
+ numDynamicOffsets, dynamicOffsets,
+ numPreBarriers, preBarriers,
+ numPostBarriers, postBarriers);
+
+ tcu::Vec4 results[4];
+ bool anyResultSet = false;
+ bool allResultsOk = true;
+
+ compute.submitAndWait(m_queueFamilyIndex, m_queue);
+ m_result.readResultContentsTo(&results);
+
+ // verify
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ const tcu::Vec4 result = results[resultNdx];
+ const tcu::Vec4 reference = m_images.fetchImageValue(resultNdx);
+ const tcu::Vec4 conversionThreshold = tcu::Vec4(1.0f / 255.0f);
+
+ if (result != tcu::Vec4(-1.0f))
+ anyResultSet = true;
+
+ if (tcu::boolAny(tcu::greaterThan(tcu::abs(result - reference), conversionThreshold)))
+ {
+ allResultsOk = false;
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Test sample " << resultNdx << ": Expected " << reference << ", got " << result
+ << tcu::TestLog::EndMessage;
+ }
+ }
+
+ // read back and verify
+ if (allResultsOk)
+ return tcu::TestStatus::pass("Pass");
+ else if (anyResultSet)
+ return tcu::TestStatus::fail("Invalid result values");
+ else
+ {
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Result buffer was not written to."
+ << tcu::TestLog::EndMessage;
+ return tcu::TestStatus::fail("Result buffer was not written to");
+ }
+}
+
+class ImageSampleInstanceImages : private ImageInstanceImages
+{
+public:
+ ImageSampleInstanceImages (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice,
+ bool immutable);
+
+ static tcu::Vec4 getSamplePos (vk::VkImageViewType viewType, deUint32 baseMipLevel, deUint32 baseArraySlice, int samplePosNdx);
+ tcu::Vec4 fetchSampleValue (int samplePosNdx) const;
+
+ inline vk::VkImageView getImageViewA (void) const { return *m_imageViewA; }
+ inline vk::VkImageView getImageViewB (void) const { return *m_imageViewB; }
+ inline vk::VkSampler getSamplerA (void) const { return *m_samplerA; }
+ inline vk::VkSampler getSamplerB (void) const { return *m_samplerB; }
+ inline bool isImmutable (void) const { return m_isImmutable; }
+
+private:
+ static int getNumImages (vk::VkDescriptorType descriptorType, ShaderInputInterface shaderInterface);
+ static tcu::Sampler createRefSampler (bool isFirst);
+ static vk::Move<vk::VkSampler> createSampler (const vk::DeviceInterface& vki, vk::VkDevice device, const tcu::Sampler& sampler, const tcu::TextureFormat& format);
+
+ static tcu::Texture1DArrayView getRef1DView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage);
+ static tcu::Texture2DArrayView getRef2DView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage);
+ static tcu::Texture3DView getRef3DView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage);
+ static tcu::TextureCubeArrayView getRefCubeView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage);
+
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const bool m_isImmutable;
+
+ const tcu::Sampler m_refSamplerA;
+ const tcu::Sampler m_refSamplerB;
+ const vk::Unique<vk::VkSampler> m_samplerA;
+ const vk::Unique<vk::VkSampler> m_samplerB;
+};
+
+ImageSampleInstanceImages::ImageSampleInstanceImages (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex,
+ vk::VkQueue queue,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice,
+ bool immutable)
+ : ImageInstanceImages (vki,
+ device,
+ queueFamilyIndex,
+ queue,
+ allocator,
+ descriptorType,
+ viewType,
+ getNumImages(descriptorType, shaderInterface),
+ baseMipLevel,
+ baseArraySlice)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_isImmutable (immutable)
+ , m_refSamplerA (createRefSampler(true))
+ , m_refSamplerB (createRefSampler(false))
+ , m_samplerA (createSampler(vki, device, m_refSamplerA, m_imageFormat))
+ , m_samplerB ((getInterfaceNumResources(m_shaderInterface) == 1u)
+ ? vk::Move<vk::VkSampler>()
+ : createSampler(vki, device, m_refSamplerB, m_imageFormat))
+{
+}
+
+tcu::Vec4 ImageSampleInstanceImages::getSamplePos (vk::VkImageViewType viewType, deUint32 baseMipLevel, deUint32 baseArraySlice, int samplePosNdx)
+{
+ DE_ASSERT(de::inBounds(samplePosNdx, 0, 4));
+
+ const deUint32 imageSize = (deUint32)IMAGE_SIZE >> baseMipLevel;
+ const deUint32 arraySize = isImageViewTypeArray(viewType) ? ARRAY_SIZE - baseArraySlice : 1;
+
+ // choose arbitrary values that are not ambiguous with NEAREST filtering
+
+ switch (viewType)
+ {
+ case vk::VK_IMAGE_VIEW_TYPE_1D:
+ case vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+ case vk::VK_IMAGE_VIEW_TYPE_2D:
+ case vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+ case vk::VK_IMAGE_VIEW_TYPE_3D:
+ {
+ const tcu::Vec3 coords[4] =
+ {
+ tcu::Vec3(0.75f,
+ 0.5f,
+ (float)(12u % imageSize) + 0.25f),
+
+ tcu::Vec3((float)(23u % imageSize) + 0.25f,
+ (float)(73u % imageSize) + 0.5f,
+ (float)(16u % imageSize) + 0.5f + (float)imageSize),
+
+ tcu::Vec3(-(float)(43u % imageSize) + 0.25f,
+ (float)(84u % imageSize) + 0.5f + (float)imageSize,
+ (float)(117u % imageSize) + 0.75f),
+
+ tcu::Vec3((float)imageSize + 0.5f,
+ (float)(75u % imageSize) + 0.25f,
+ (float)(83u % imageSize) + 0.25f + (float)imageSize),
+ };
+ const deUint32 slices[4] =
+ {
+ 0u % arraySize,
+ 4u % arraySize,
+ 9u % arraySize,
+ 2u % arraySize,
+ };
+
+ if (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY)
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
+ (float)slices[samplePosNdx],
+ 0.0f,
+ 0.0f);
+ else if (viewType == vk::VK_IMAGE_VIEW_TYPE_2D || viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY)
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
+ coords[samplePosNdx].y() / (float)imageSize,
+ (float)slices[samplePosNdx],
+ 0.0f);
+ else if (viewType == vk::VK_IMAGE_VIEW_TYPE_3D)
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
+ coords[samplePosNdx].y() / (float)imageSize,
+ coords[samplePosNdx].z() / (float)imageSize,
+ 0.0f);
+ else
+ {
+ DE_FATAL("Impossible");
+ return tcu::Vec4();
+ }
+ }
+
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE:
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+ {
+ // \note these values are in [0, texSize]*3 space for convenience
+ const tcu::Vec3 coords[4] =
+ {
+ tcu::Vec3(0.75f,
+ 0.5f,
+ (float)imageSize),
+
+ tcu::Vec3((float)(13u % imageSize) + 0.25f,
+ 0.0f,
+ (float)(16u % imageSize) + 0.5f),
+
+ tcu::Vec3(0.0f,
+ (float)(84u % imageSize) + 0.5f,
+ (float)(10u % imageSize) + 0.75f),
+
+ tcu::Vec3((float)imageSize,
+ (float)(75u % imageSize) + 0.25f,
+ (float)(83u % imageSize) + 0.75f),
+ };
+ const deUint32 slices[4] =
+ {
+ 1u % arraySize,
+ 2u % arraySize,
+ 9u % arraySize,
+ 5u % arraySize,
+ };
+
+ DE_ASSERT(de::inRange(coords[samplePosNdx].x(), 0.0f, (float)imageSize));
+ DE_ASSERT(de::inRange(coords[samplePosNdx].y(), 0.0f, (float)imageSize));
+ DE_ASSERT(de::inRange(coords[samplePosNdx].z(), 0.0f, (float)imageSize));
+
+ // map to [-1, 1]*3 space
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize * 2.0f - 1.0f,
+ coords[samplePosNdx].y() / (float)imageSize * 2.0f - 1.0f,
+ coords[samplePosNdx].z() / (float)imageSize * 2.0f - 1.0f,
+ (float)slices[samplePosNdx]);
+ }
+
+ default:
+ DE_FATAL("Impossible");
+ return tcu::Vec4();
+ }
+}
+
+tcu::Vec4 ImageSampleInstanceImages::fetchSampleValue (int samplePosNdx) const
+{
+ DE_ASSERT(de::inBounds(samplePosNdx, 0, 4));
+
+ // texture order is ABAB
+ const bool isSamplerCase = (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER);
+ const tcu::TextureLevelPyramid& sampleSrcA = m_sourceImageA;
+ const tcu::TextureLevelPyramid& sampleSrcB = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? (m_sourceImageA) : (m_sourceImageB);
+ const tcu::TextureLevelPyramid& sampleSrc = (isSamplerCase) ? (sampleSrcA) : ((samplePosNdx % 2) == 0) ? (sampleSrcA) : (sampleSrcB);
+
+ // sampler order is ABAB
+ const tcu::Sampler& samplerA = m_refSamplerA;
+ const tcu::Sampler& samplerB = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? (m_refSamplerA) : (m_refSamplerB);
+ const tcu::Sampler& sampler = ((samplePosNdx % 2) == 0) ? (samplerA) : (samplerB);
+
+ const tcu::Vec4 samplePos = getSamplePos(m_viewType, m_baseMipLevel, m_baseArraySlice, samplePosNdx);
+ const float lod = 0.0f;
+ std::vector<tcu::ConstPixelBufferAccess> levelStorage;
+
+ switch (m_viewType)
+ {
+ case vk::VK_IMAGE_VIEW_TYPE_1D:
+ case vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY: return getRef1DView(sampleSrc, m_baseMipLevel, m_baseArraySlice, &levelStorage).sample(sampler, samplePos.x(), samplePos.y(), lod);
+ case vk::VK_IMAGE_VIEW_TYPE_2D:
+ case vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY: return getRef2DView(sampleSrc, m_baseMipLevel, m_baseArraySlice, &levelStorage).sample(sampler, samplePos.x(), samplePos.y(), samplePos.z(), lod);
+ case vk::VK_IMAGE_VIEW_TYPE_3D: return getRef3DView(sampleSrc, m_baseMipLevel, m_baseArraySlice, &levelStorage).sample(sampler, samplePos.x(), samplePos.y(), samplePos.z(), lod);
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE:
+ case vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return getRefCubeView(sampleSrc, m_baseMipLevel, m_baseArraySlice, &levelStorage).sample(sampler, samplePos.x(), samplePos.y(), samplePos.z(), samplePos.w(), lod);
+
+ default:
+ {
+ DE_FATAL("Impossible");
+ return tcu::Vec4();
+ }
+ }
+}
+
+int ImageSampleInstanceImages::getNumImages (vk::VkDescriptorType descriptorType, ShaderInputInterface shaderInterface)
+{
+ // If we are testing separate samplers, just one image is enough
+ if (descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ return 1;
+ else if (descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ {
+ // combined: numImages == numSamplers
+ return getInterfaceNumResources(shaderInterface);
+ }
+ else
+ {
+ DE_FATAL("Impossible");
+ return 0;
+ }
+}
+
+tcu::Sampler ImageSampleInstanceImages::createRefSampler (bool isFirst)
+{
+ if (isFirst)
+ {
+ // linear, wrapping
+ return tcu::Sampler(tcu::Sampler::REPEAT_GL, tcu::Sampler::REPEAT_GL, tcu::Sampler::REPEAT_GL, tcu::Sampler::LINEAR, tcu::Sampler::LINEAR);
+ }
+ else
+ {
+ // nearest, clamping
+ return tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::NEAREST, tcu::Sampler::NEAREST);
+ }
+}
+
+vk::Move<vk::VkSampler> ImageSampleInstanceImages::createSampler (const vk::DeviceInterface& vki, vk::VkDevice device, const tcu::Sampler& sampler, const tcu::TextureFormat& format)
+{
+ const bool compareEnabled = (sampler.compare != tcu::Sampler::COMPAREMODE_NONE);
+ const vk::VkCompareOp compareOp = (compareEnabled) ? (mapToVkCompareOp(sampler.compare)) : (vk::VK_COMPARE_OP_ALWAYS);
+ const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(format.type);
+ const bool isIntTexture = channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER;
+ const vk::VkBorderColor borderColor = (isIntTexture) ? (vk::VK_BORDER_COLOR_INT_OPAQUE_WHITE) : (vk::VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE);
+ const vk::VkSamplerCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+ DE_NULL,
+ mapMagFilterToVkTexFilter(sampler.magFilter), // magFilter
+ mapMinFilterToVkTexFilter(sampler.minFilter), // minFilter
+ mapMinFilterToVkTexMipmapMode(sampler.minFilter), // mipMode
+ mapToVkTexAddress(sampler.wrapS), // addressU
+ mapToVkTexAddress(sampler.wrapT), // addressV
+ mapToVkTexAddress(sampler.wrapR), // addressW
+ 0.0f, // mipLodBias
+ 1, // maxAnisotropy
+ (compareEnabled) ? (vk::VkBool32)(vk::VK_TRUE) : (vk::VkBool32)(vk::VK_FALSE), // compareEnable
+ compareOp, // compareOp
+ 0.0f, // minLod
+ 0.0f, // maxLod
+ borderColor, // borderColor
+ };
+ return vk::createSampler(vki, device, &createInfo);
+}
+
+tcu::Texture1DArrayView ImageSampleInstanceImages::getRef1DView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage)
+{
+ DE_ASSERT(levelStorage->empty());
+
+ const deUint32 numSlices = (deUint32)source.getLevel(0).getHeight();
+ const deUint32 numLevels = (deUint32)source.getNumLevels();
+
+ // cut pyramid from baseMipLevel
+ for (deUint32 level = baseMipLevel; level < numLevels; ++level)
+ {
+ // cut levels from baseArraySlice
+ const tcu::ConstPixelBufferAccess wholeLevel = source.getLevel(level);
+ const tcu::ConstPixelBufferAccess cutLevel = tcu::getSubregion(wholeLevel, 0, baseArraySlice, wholeLevel.getWidth(), numSlices - baseArraySlice);
+ levelStorage->push_back(cutLevel);
+ }
+
+ return tcu::Texture1DArrayView((int)levelStorage->size(), &levelStorage->front());
+}
+
+tcu::Texture2DArrayView ImageSampleInstanceImages::getRef2DView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage)
+{
+ DE_ASSERT(levelStorage->empty());
+
+ const deUint32 numSlices = (deUint32)source.getLevel(0).getDepth();
+ const deUint32 numLevels = (deUint32)source.getNumLevels();
+
+ // cut pyramid from baseMipLevel
+ for (deUint32 level = baseMipLevel; level < numLevels; ++level)
+ {
+ // cut levels from baseArraySlice
+ const tcu::ConstPixelBufferAccess wholeLevel = source.getLevel(level);
+ const tcu::ConstPixelBufferAccess cutLevel = tcu::getSubregion(wholeLevel, 0, 0, baseArraySlice, wholeLevel.getWidth(), wholeLevel.getHeight(), numSlices - baseArraySlice);
+ levelStorage->push_back(cutLevel);
+ }
+
+ return tcu::Texture2DArrayView((int)levelStorage->size(), &levelStorage->front());
+}
+
+tcu::Texture3DView ImageSampleInstanceImages::getRef3DView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage)
+{
+ DE_ASSERT(levelStorage->empty());
+ DE_ASSERT(baseArraySlice == 0);
+ DE_UNREF(baseArraySlice);
+
+ const deUint32 numLevels = (deUint32)source.getNumLevels();
+
+ // cut pyramid from baseMipLevel
+ for (deUint32 level = baseMipLevel; level < numLevels; ++level)
+ levelStorage->push_back(source.getLevel(level));
+
+ return tcu::Texture3DView((int)levelStorage->size(), &levelStorage->front());
+}
+
+tcu::TextureCubeArrayView ImageSampleInstanceImages::getRefCubeView (const tcu::TextureLevelPyramid& source, deUint32 baseMipLevel, deUint32 baseArraySlice, std::vector<tcu::ConstPixelBufferAccess>* levelStorage)
+{
+ DE_ASSERT(levelStorage->empty());
+
+ const deUint32 numSlices = (deUint32)source.getLevel(0).getDepth() / 6;
+ const deUint32 numLevels = (deUint32)source.getNumLevels();
+
+ // cut pyramid from baseMipLevel
+ for (deUint32 level = baseMipLevel; level < numLevels; ++level)
+ {
+ // cut levels from baseArraySlice
+ const tcu::ConstPixelBufferAccess wholeLevel = source.getLevel(level);
+ const tcu::ConstPixelBufferAccess cutLevel = tcu::getSubregion(wholeLevel, 0, 0, baseArraySlice * 6, wholeLevel.getWidth(), wholeLevel.getHeight(), (numSlices - baseArraySlice) * 6);
+ levelStorage->push_back(cutLevel);
+ }
+
+ return tcu::TextureCubeArrayView((int)levelStorage->size(), &levelStorage->front());
+}
+
+class ImageSampleRenderInstance : public SingleCmdRenderInstance
+{
+public:
+ ImageSampleRenderInstance (vkt::Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice,
+ bool isImmutable);
+
+private:
+ static vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags,
+ const ImageSampleInstanceImages& images);
+
+ static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout);
+
+ static vk::Move<vk::VkDescriptorPool> createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ bool isImmutable);
+
+ static vk::Move<vk::VkDescriptorSet> createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkDescriptorSetLayout layout,
+ vk::VkDescriptorPool pool,
+ bool isImmutable,
+ const ImageSampleInstanceImages& images);
+
+ static void writeSamplerDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ ShaderInputInterface shaderInterface,
+ bool isImmutable,
+ const ImageSampleInstanceImages& images,
+ vk::VkDescriptorSet descriptorSet);
+
+ static void writeImageSamplerDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ ShaderInputInterface shaderInterface,
+ bool isImmutable,
+ const ImageSampleInstanceImages& images,
+ vk::VkDescriptorSet descriptorSet);
+
+ void logTestPlan (void) const;
+ vk::VkPipelineLayout getPipelineLayout (void) const;
+ void writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const;
+ tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const;
+
+ enum
+ {
+ RENDER_SIZE = 128,
+ };
+
+ const vk::VkDescriptorType m_descriptorType;
+ const vk::VkShaderStageFlags m_stageFlags;
+ const ShaderInputInterface m_shaderInterface;
+ const vk::VkImageViewType m_viewType;
+ const deUint32 m_baseMipLevel;
+ const deUint32 m_baseArraySlice;
+
+ const ImageSampleInstanceImages m_images;
+ const vk::Unique<vk::VkDescriptorSetLayout> m_descriptorSetLayout;
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+ const vk::Unique<vk::VkDescriptorPool> m_descriptorPool;
+ const vk::Unique<vk::VkDescriptorSet> m_descriptorSet;
+};
+
+ImageSampleRenderInstance::ImageSampleRenderInstance (vkt::Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice,
+ bool isImmutable)
+ : SingleCmdRenderInstance (context, isPrimaryCmdBuf, tcu::UVec2(RENDER_SIZE, RENDER_SIZE))
+ , m_descriptorType (descriptorType)
+ , m_stageFlags (stageFlags)
+ , m_shaderInterface (shaderInterface)
+ , m_viewType (viewType)
+ , m_baseMipLevel (baseMipLevel)
+ , m_baseArraySlice (baseArraySlice)
+ , m_images (m_vki, m_device, m_queueFamilyIndex, m_queue, m_allocator, m_descriptorType, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice, isImmutable)
+ , m_descriptorSetLayout (createDescriptorSetLayout(m_vki, m_device, m_descriptorType, m_shaderInterface, m_stageFlags, m_images))
+ , m_pipelineLayout (createPipelineLayout(m_vki, m_device, *m_descriptorSetLayout))
+ , m_descriptorPool (createDescriptorPool(m_vki, m_device, m_descriptorType, m_shaderInterface, isImmutable))
+ , m_descriptorSet (createDescriptorSet(m_vki, m_device, m_descriptorType, m_shaderInterface, *m_descriptorSetLayout, *m_descriptorPool, isImmutable, m_images))
+{
+}
+
+vk::Move<vk::VkDescriptorSetLayout> ImageSampleRenderInstance::createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags,
+ const ImageSampleInstanceImages& images)
+{
+ const vk::VkSampler samplers[2] =
+ {
+ images.getSamplerA(),
+ images.getSamplerB(),
+ };
+
+ vk::DescriptorSetLayoutBuilder builder;
+
+ // with samplers, separate texture at binding 0
+ if (descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ builder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, stageFlags);
+
+ // (combined)samplers follow
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleSamplerBinding(descriptorType, stageFlags, (images.isImmutable()) ? (&samplers[0]) : (DE_NULL));
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleSamplerBinding(descriptorType, stageFlags, (images.isImmutable()) ? (&samplers[0]) : (DE_NULL));
+ builder.addSingleSamplerBinding(descriptorType, stageFlags, (images.isImmutable()) ? (&samplers[1]) : (DE_NULL));
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArraySamplerBinding(descriptorType, 2u, stageFlags, (images.isImmutable()) ? (samplers) : (DE_NULL));
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ return builder.build(vki, device);
+}
+
+vk::Move<vk::VkPipelineLayout> ImageSampleRenderInstance::createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout)
+{
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ 1, // descriptorSetCount
+ &descriptorSetLayout, // pSetLayouts
+ 0u, // pushConstantRangeCount
+ DE_NULL, // pPushConstantRanges
+ };
+ return vk::createPipelineLayout(vki, device, &createInfo);
+}
+
+vk::Move<vk::VkDescriptorPool> ImageSampleRenderInstance::createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ bool isImmutable)
+{
+ vk::DescriptorPoolBuilder builder;
+
+ if (descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ {
+ // separate samplers need image to sample
+ builder.addType(vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
+
+ // samplers needed only if they are specified in the descriptor set
+ if (!isImmutable)
+ builder.addType(vk::VK_DESCRIPTOR_TYPE_SAMPLER, getInterfaceNumResources(shaderInterface));
+ }
+ else if (descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ {
+ // combined image samplers
+ builder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, getInterfaceNumResources(shaderInterface));
+ }
+ else
+ DE_FATAL("Impossible");
+
+ return builder.build(vki, device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> ImageSampleRenderInstance::createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkDescriptorSetLayout layout,
+ vk::VkDescriptorPool pool,
+ bool isImmutable,
+ const ImageSampleInstanceImages& images)
+{
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(vki, device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+
+ if (descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ writeSamplerDescriptorSet(vki, device, shaderInterface, isImmutable, images, *descriptorSet);
+ else if (descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ writeImageSamplerDescriptorSet(vki, device, shaderInterface, isImmutable, images, *descriptorSet);
+ else
+ DE_FATAL("Impossible");
+
+ return descriptorSet;
+}
+
+void ImageSampleRenderInstance::writeSamplerDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ ShaderInputInterface shaderInterface,
+ bool isImmutable,
+ const ImageSampleInstanceImages& images,
+ vk::VkDescriptorSet descriptorSet)
+{
+ const vk::VkDescriptorInfo imageInfo = createDescriptorInfo(images.getImageViewA(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+ const vk::VkDescriptorInfo samplersInfos[2] =
+ {
+ createDescriptorInfo(images.getSamplerA()),
+ createDescriptorInfo(images.getSamplerB()),
+ };
+
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // stand alone texture
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &imageInfo);
+
+ // samplers
+ if (!isImmutable)
+ {
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, &samplersInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, &samplersInfos[0]);
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, &samplersInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, 2u, samplersInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+ }
+
+ builder.update(vki, device);
+}
+
+void ImageSampleRenderInstance::writeImageSamplerDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ ShaderInputInterface shaderInterface,
+ bool isImmutable,
+ const ImageSampleInstanceImages& images,
+ vk::VkDescriptorSet descriptorSet)
+{
+ const vk::VkSampler samplers[2] =
+ {
+ (isImmutable) ? (0) : (images.getSamplerA()),
+ (isImmutable) ? (0) : (images.getSamplerB()),
+ };
+ const vk::VkDescriptorInfo imageSamplers[2] =
+ {
+ createDescriptorInfo(samplers[0], images.getImageViewA(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ createDescriptorInfo(samplers[1], images.getImageViewB(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ };
+
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // combined image samplers
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSamplers[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSamplers[0]);
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSamplers[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 2u, imageSamplers);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(vki, device);
+}
+
+void ImageSampleRenderInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Rendering 2x2 grid.\n";
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ {
+ msg << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " VK_DESCRIPTOR_TYPE_SAMPLER descriptor(s) and a single texture.\n";
+ }
+ else if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ {
+ msg << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER descriptor(s).\n";
+ }
+ else
+ DE_FATAL("Impossible");
+
+ msg << "Image view type is " << vk::getImageViewTypeName(m_viewType) << "\n";
+
+ if (m_baseMipLevel)
+ msg << "Image view base mip level = " << m_baseMipLevel << "\n";
+ if (m_baseArraySlice)
+ msg << "Image view base array slice = " << m_baseArraySlice << "\n";
+
+ if (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR)
+ msg << "Sampler mode is LINEAR, with WRAP\n";
+ else
+ msg << "Sampler 0 mode is LINEAR, with WRAP\nSampler 1 mode is NEAREST with CLAMP\n";
+
+ if (m_stageFlags == 0u)
+ {
+ msg << "Descriptors are not accessed in any shader stage.\n";
+ }
+ else
+ {
+ msg << "Color in each cell is fetched using the descriptor(s):\n";
+
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ msg << "Test sample " << resultNdx << ": sample at position " << m_images.getSamplePos(m_viewType, m_baseMipLevel, m_baseArraySlice, resultNdx);
+
+ if (m_shaderInterface != SHADER_INPUT_SINGLE_DESCRIPTOR)
+ {
+ const int srcResourceNdx = (resultNdx % 2); // ABAB source
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ msg << " using sampler " << srcResourceNdx;
+ else if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ msg << " from combined image sampler " << srcResourceNdx;
+ else
+ DE_FATAL("Impossible");
+ }
+ msg << "\n";
+ }
+
+ msg << "Descriptors are accessed in {"
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0) ? (" vertex") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0) ? (" tess_control") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0) ? (" tess_evaluation") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0) ? (" geometry") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0) ? (" fragment") : (""))
+ << " } stages.";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+vk::VkPipelineLayout ImageSampleRenderInstance::getPipelineLayout (void) const
+{
+ return *m_pipelineLayout;
+}
+
+void ImageSampleRenderInstance::writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const
+{
+ m_vki.cmdBindDescriptorSets(cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, getPipelineLayout(), 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
+ m_vki.cmdDraw(cmd, 0u, 6u * 4u, 0u, 1u); // render four quads (two separate triangles)
+}
+
+tcu::TestStatus ImageSampleRenderInstance::verifyResultImage (const tcu::ConstPixelBufferAccess& result) const
+{
+ const tcu::Vec4 green (0.0f, 1.0f, 0.0f, 1.0f);
+ const tcu::Vec4 yellow (1.0f, 1.0f, 0.0f, 1.0f);
+ const bool doFetch = (m_stageFlags != 0u); // no active stages? Then don't fetch
+ const tcu::Vec4 sample0 = (!doFetch) ? (yellow) : (m_images.fetchSampleValue(0));
+ const tcu::Vec4 sample1 = (!doFetch) ? (green) : (m_images.fetchSampleValue(1));
+ const tcu::Vec4 sample2 = (!doFetch) ? (green) : (m_images.fetchSampleValue(2));
+ const tcu::Vec4 sample3 = (!doFetch) ? (yellow) : (m_images.fetchSampleValue(3));
+ const tcu::RGBA threshold = tcu::RGBA(8, 8, 8, 8); // source image is high-frequency so the threshold is quite large to tolerate sampling errors
+ tcu::Surface reference (m_targetSize.x(), m_targetSize.y());
+
+ drawQuadrantReferenceResult(reference.getAccess(), sample0, sample1, sample2, sample3);
+
+ if (!bilinearCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", reference.getAccess(), result, threshold, tcu::COMPARE_LOG_RESULT))
+ return tcu::TestStatus::fail("Image verification failed");
+ else
+ return tcu::TestStatus::pass("Pass");
+}
+
+class ImageSampleComputeInstance : public vkt::TestInstance
+{
+public:
+ ImageSampleComputeInstance (vkt::Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice,
+ bool isImmutableSampler);
+
+private:
+ vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
+ vk::Move<vk::VkDescriptorPool> createDescriptorPool (void) const;
+ vk::Move<vk::VkDescriptorSet> createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout) const;
+ void writeImageSamplerDescriptorSet (vk::VkDescriptorSet descriptorSet) const;
+ void writeSamplerDescriptorSet (vk::VkDescriptorSet descriptorSet) const;
+
+ tcu::TestStatus iterate (void);
+ void logTestPlan (void) const;
+ tcu::TestStatus testResourceAccess (void);
+
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const vk::VkImageViewType m_viewType;
+ const deUint32 m_baseMipLevel;
+ const deUint32 m_baseArraySlice;
+ const bool m_isImmutableSampler;
+
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+ const vk::VkQueue m_queue;
+ const deUint32 m_queueFamilyIndex;
+ vk::Allocator& m_allocator;
+
+ const ComputeInstanceResultBuffer m_result;
+ const ImageSampleInstanceImages m_images;
+};
+
+ImageSampleComputeInstance::ImageSampleComputeInstance (Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 baseMipLevel,
+ deUint32 baseArraySlice,
+ bool isImmutableSampler)
+ : vkt::TestInstance (context)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_viewType (viewType)
+ , m_baseMipLevel (baseMipLevel)
+ , m_baseArraySlice (baseArraySlice)
+ , m_isImmutableSampler (isImmutableSampler)
+ , m_vki (context.getDeviceInterface())
+ , m_device (context.getDevice())
+ , m_queue (context.getUniversalQueue())
+ , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
+ , m_allocator (context.getDefaultAllocator())
+ , m_result (m_vki, m_device, m_allocator)
+ , m_images (m_vki, m_device, m_queueFamilyIndex, m_queue, m_allocator, m_descriptorType, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice, isImmutableSampler)
+{
+}
+
+vk::Move<vk::VkDescriptorSetLayout> ImageSampleComputeInstance::createDescriptorSetLayout (void) const
+{
+ const vk::VkSampler samplers[2] =
+ {
+ m_images.getSamplerA(),
+ m_images.getSamplerB(),
+ };
+
+ vk::DescriptorSetLayoutBuilder builder;
+
+ // result buffer
+ builder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+
+ // with samplers, separate texture at binding 0
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ builder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+
+ // (combined)samplers follow
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleSamplerBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, (m_images.isImmutable()) ? (&samplers[0]) : (DE_NULL));
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleSamplerBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, (m_images.isImmutable()) ? (&samplers[0]) : (DE_NULL));
+ builder.addSingleSamplerBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, (m_images.isImmutable()) ? (&samplers[1]) : (DE_NULL));
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArraySamplerBinding(m_descriptorType, 2u, vk::VK_SHADER_STAGE_COMPUTE_BIT, (m_images.isImmutable()) ? (samplers) : (DE_NULL));
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ };
+
+ return builder.build(m_vki, m_device);
+}
+
+vk::Move<vk::VkDescriptorPool> ImageSampleComputeInstance::createDescriptorPool (void) const
+{
+ vk::DescriptorPoolBuilder builder;
+
+ builder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ builder.addType(m_descriptorType, getInterfaceNumResources(m_shaderInterface));
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ builder.addType(vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
+
+ return builder.build(m_vki, m_device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> ImageSampleComputeInstance::createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout) const
+{
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(m_vki, m_device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ writeSamplerDescriptorSet(*descriptorSet);
+ else if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ writeImageSamplerDescriptorSet(*descriptorSet);
+ else
+ DE_FATAL("Impossible");
+
+ return descriptorSet;
+}
+
+void ImageSampleComputeInstance::writeSamplerDescriptorSet (vk::VkDescriptorSet descriptorSet) const
+{
+ const vk::VkDescriptorInfo resultInfo = createDescriptorInfo(m_result.getBufferView());
+ const vk::VkDescriptorInfo imageInfo = createDescriptorInfo(m_images.getImageViewA(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+ const vk::VkDescriptorInfo samplersInfos[2] =
+ {
+ createDescriptorInfo(m_images.getSamplerA()),
+ createDescriptorInfo(m_images.getSamplerB()),
+ };
+
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // result
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultInfo);
+
+ // stand alone texture
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &imageInfo);
+
+ // samplers
+ if (!m_isImmutableSampler)
+ {
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, &samplersInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, &samplersInfos[0]);
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(3u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, &samplersInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_SAMPLER, 2u, samplersInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+ }
+
+ builder.update(m_vki, m_device);
+}
+
+void ImageSampleComputeInstance::writeImageSamplerDescriptorSet (vk::VkDescriptorSet descriptorSet) const
+{
+ const vk::VkDescriptorInfo resultInfo = createDescriptorInfo(m_result.getBufferView());
+ const vk::VkSampler samplers[2] =
+ {
+ (m_isImmutableSampler) ? (0) : (m_images.getSamplerA()),
+ (m_isImmutableSampler) ? (0) : (m_images.getSamplerB()),
+ };
+ const vk::VkDescriptorInfo imageSamplers[2] =
+ {
+ createDescriptorInfo(samplers[0], m_images.getImageViewA(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ createDescriptorInfo(samplers[1], m_images.getImageViewB(), vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL),
+ };
+
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // result
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultInfo);
+
+ // combined image samplers
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSamplers[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSamplers[0]);
+ builder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSamplers[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 2u, imageSamplers);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(m_vki, m_device);
+}
+
+tcu::TestStatus ImageSampleComputeInstance::iterate (void)
+{
+ logTestPlan();
+ return testResourceAccess();
+}
+
+void ImageSampleComputeInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Accessing resource in a compute program.\n";
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ {
+ msg << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " VK_DESCRIPTOR_TYPE_SAMPLER descriptor(s) and a single texture.\n";
+ }
+ else if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ {
+ msg << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER descriptor(s).\n";
+ }
+ else
+ DE_FATAL("Impossible");
+
+ msg << "Image view type is " << vk::getImageViewTypeName(m_viewType) << "\n";
+
+ if (m_baseMipLevel)
+ msg << "Image view base mip level = " << m_baseMipLevel << "\n";
+ if (m_baseArraySlice)
+ msg << "Image view base array slice = " << m_baseArraySlice << "\n";
+
+ if (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR)
+ msg << "Sampler mode is LINEAR, with WRAP\n";
+ else
+ msg << "Sampler 0 mode is LINEAR, with WRAP\nSampler 1 mode is NEAREST with CLAMP\n";
+
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ msg << "Test sample " << resultNdx << ": sample at position " << m_images.getSamplePos(m_viewType, m_baseMipLevel, m_baseArraySlice, resultNdx);
+
+ if (m_shaderInterface != SHADER_INPUT_SINGLE_DESCRIPTOR)
+ {
+ const int srcResourceNdx = (resultNdx % 2); // ABAB source
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ msg << " using sampler " << srcResourceNdx;
+ else if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ msg << " from combined image sampler " << srcResourceNdx;
+ else
+ DE_FATAL("Impossible");
+ }
+ msg << "\n";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+tcu::TestStatus ImageSampleComputeInstance::testResourceAccess (void)
+{
+ const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
+ const vk::Unique<vk::VkDescriptorPool> descriptorPool (createDescriptorPool());
+ const vk::Unique<vk::VkDescriptorSet> descriptorSet (createDescriptorSet(*descriptorPool, *descriptorSetLayout));
+ const ComputePipeline pipeline (m_vki, m_device, m_context.getBinaryCollection(), 1, &descriptorSetLayout.get());
+
+ const vk::VkDescriptorSet descriptorSets[] = { *descriptorSet };
+ const int numDescriptorSets = DE_LENGTH_OF_ARRAY(descriptorSets);
+ const deUint32* const dynamicOffsets = DE_NULL;
+ const int numDynamicOffsets = 0;
+ const void* const* preBarriers = DE_NULL;
+ const int numPreBarriers = 0;
+ const void* const postBarriers[] = { m_result.getResultReadBarrier() };
+ const int numPostBarriers = DE_LENGTH_OF_ARRAY(postBarriers);
+
+ const ComputeCommand compute (m_vki,
+ m_device,
+ pipeline.getPipeline(),
+ pipeline.getPipelineLayout(),
+ tcu::UVec3(4, 1, 1),
+ numDescriptorSets, descriptorSets,
+ numDynamicOffsets, dynamicOffsets,
+ numPreBarriers, preBarriers,
+ numPostBarriers, postBarriers);
+
+ tcu::Vec4 results[4];
+ bool anyResultSet = false;
+ bool allResultsOk = true;
+
+ compute.submitAndWait(m_queueFamilyIndex, m_queue);
+ m_result.readResultContentsTo(&results);
+
+ // verify
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ const tcu::Vec4 result = results[resultNdx];
+ const tcu::Vec4 reference = m_images.fetchSampleValue(resultNdx);
+
+ // source image is high-frequency so the threshold is quite large to tolerate sampling errors
+ const tcu::Vec4 samplingThreshold = tcu::Vec4(8.0f / 255.0f);
+
+ if (result != tcu::Vec4(-1.0f))
+ anyResultSet = true;
+
+ if (tcu::boolAny(tcu::greaterThan(tcu::abs(result - reference), samplingThreshold)))
+ {
+ allResultsOk = false;
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Test sample " << resultNdx << ":\n"
+ << "\tSampling at " << m_images.getSamplePos(m_viewType, m_baseMipLevel, m_baseArraySlice, resultNdx) << "\n"
+ << "\tError expected " << reference << ", got " << result
+ << tcu::TestLog::EndMessage;
+ }
+ }
+
+ // read back and verify
+ if (allResultsOk)
+ return tcu::TestStatus::pass("Pass");
+ else if (anyResultSet)
+ return tcu::TestStatus::fail("Invalid result values");
+ else
+ {
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Result buffer was not written to."
+ << tcu::TestLog::EndMessage;
+ return tcu::TestStatus::fail("Result buffer was not written to");
+ }
+}
+
+class ImageDescriptorCase : public QuadrantRendederCase
+{
+public:
+ enum
+ {
+ FLAG_BASE_MIP = (1u << 1u),
+ FLAG_BASE_SLICE = (1u << 2u),
+ };
+ // enum continues where resource flags ends
+ DE_STATIC_ASSERT((deUint32)FLAG_BASE_MIP == (deUint32)RESOURCE_FLAG_LAST);
+
+ ImageDescriptorCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 flags);
+
+private:
+ std::string genExtensionDeclarations (vk::VkShaderStage stage) const;
+ std::string genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const;
+ std::string genFetchCoordStr (int fetchPosNdx) const;
+ std::string genSampleCoordStr (int samplePosNdx) const;
+ std::string genResourceAccessSource (vk::VkShaderStage stage) const;
+ std::string genNoAccessSource (void) const;
+
+ vkt::TestInstance* createInstance (vkt::Context& context) const;
+
+private:
+ const bool m_isPrimaryCmdBuf;
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const vk::VkImageViewType m_viewType;
+ const deUint32 m_baseMipLevel;
+ const deUint32 m_baseArraySlice;
+ const bool m_isImmutableSampler;
+};
+
+ImageDescriptorCase::ImageDescriptorCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface shaderInterface,
+ vk::VkImageViewType viewType,
+ deUint32 flags)
+ : QuadrantRendederCase (testCtx, name, description,
+ // \note 1D textures are not supported in ES
+ (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? glu::GLSL_VERSION_440 : glu::GLSL_VERSION_310_ES,
+ exitingStages, activeStages)
+ , m_isPrimaryCmdBuf (isPrimaryCmdBuf)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_viewType (viewType)
+ , m_baseMipLevel (((flags & FLAG_BASE_MIP) != 0) ? (1u) : (0u))
+ , m_baseArraySlice (((flags & FLAG_BASE_SLICE) != 0) ? (1u) : (0u))
+ , m_isImmutableSampler ((flags & RESOURCE_FLAG_IMMUTABLE_SAMPLER) != 0)
+{
+}
+
+std::string ImageDescriptorCase::genExtensionDeclarations (vk::VkShaderStage stage) const
+{
+ DE_UNREF(stage);
+
+ if (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
+ return "#extension GL_OES_texture_cube_map_array : require\n";
+ else
+ return "";
+}
+
+std::string ImageDescriptorCase::genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const
+{
+ DE_UNREF(stage);
+
+ // Vulkan-style resources are arrays implicitly, OpenGL-style are not
+ const std::string dimensionBase = (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? ("1D")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? ("2D")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? ("3D")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? ("Cube")
+ : (DE_NULL);
+ const std::string dimensionArray = (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? ("1DArray")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? ("2DArray")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? ("3D")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? ("CubeArray")
+ : (DE_NULL);
+ const std::string dimension = isImageViewTypeArray(m_viewType) ? dimensionArray : dimensionBase;
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ {
+ switch (m_descriptorType)
+ {
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp texture" + dimensionBase + " u_separateTexture;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + ") uniform highp sampler u_separateSampler;\n";
+ case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp sampler" + dimension + " u_combinedTextureSampler;\n";
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp texture" + dimensionBase + " u_separateTexture;\n";
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ", rgba8) readonly uniform highp image" + dimension + " u_image;\n";
+ default:
+ DE_FATAL("invalid descriptor");
+ return "";
+ }
+ }
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ switch (m_descriptorType)
+ {
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp texture" + dimensionBase + " u_separateTexture;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + ") uniform highp sampler u_separateSamplerA;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+2) + ") uniform highp sampler u_separateSamplerB;\n";
+ case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp sampler" + dimension + " u_combinedTextureSamplerA;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + ") uniform highp sampler" + dimension + " u_combinedTextureSamplerB;\n";
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp texture" + dimensionBase + " u_separateTextureA;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + ") uniform highp texture" + dimensionBase + " u_separateTextureB;\n";
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ", rgba8) readonly uniform highp image" + dimension + " u_imageA;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + ", rgba8) readonly uniform highp image" + dimension + " u_imageB;\n";
+ default:
+ DE_FATAL("invalid descriptor");
+ return "";
+ }
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ switch (m_descriptorType)
+ {
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp texture" + dimensionBase + " u_separateTexture;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + ") uniform highp sampler u_separateSamplers[2];\n";
+ case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp sampler" + dimension + " u_combinedTextureSampler[2];\n";
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ") uniform highp texture" + dimensionBase + " u_separateTexture[2];\n";
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + ", rgba8) readonly uniform highp image" + dimension + " u_image[2];\n";
+ default:
+ DE_FATAL("invalid descriptor");
+ return "";
+ }
+
+ default:
+ DE_FATAL("Impossible");
+ return "";
+ }
+}
+
+std::string ImageDescriptorCase::genFetchCoordStr (int fetchPosNdx) const
+{
+ DE_ASSERT(m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE || m_descriptorType == vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
+ const tcu::IVec3 fetchPos = ImageFetchInstanceImages::getFetchPos(m_viewType, m_baseMipLevel, m_baseArraySlice, fetchPosNdx);
+
+ if (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D)
+ {
+ return de::toString(fetchPos.x());
+ }
+ else if (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D)
+ {
+ std::ostringstream buf;
+ buf << "ivec2(" << fetchPos.x() << ", " << fetchPos.y() << ")";
+ return buf.str();
+ }
+ else
+ {
+ std::ostringstream buf;
+ buf << "ivec3(" << fetchPos.x() << ", " << fetchPos.y() << ", " << fetchPos.z() << ")";
+ return buf.str();
+ }
+}
+
+std::string ImageDescriptorCase::genSampleCoordStr (int samplePosNdx) const
+{
+ DE_ASSERT(m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER || m_descriptorType == vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
+ const tcu::Vec4 fetchPos = ImageSampleInstanceImages::getSamplePos(m_viewType, m_baseMipLevel, m_baseArraySlice, samplePosNdx);
+
+ if (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D)
+ {
+ std::ostringstream buf;
+ buf << "float(" << fetchPos.x() << ")";
+ return buf.str();
+ }
+ else if (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D)
+ {
+ std::ostringstream buf;
+ buf << "vec2(float(" << fetchPos.x() << "), float(" << fetchPos.y() << "))";
+ return buf.str();
+ }
+ else if (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
+ {
+ std::ostringstream buf;
+ buf << "vec4(float(" << fetchPos.x() << "), float(" << fetchPos.y() << "), float(" << fetchPos.z() << "), float(" << fetchPos.w() << "))";
+ return buf.str();
+ }
+ else
+ {
+ std::ostringstream buf;
+ buf << "vec3(float(" << fetchPos.x() << "), float(" << fetchPos.y() << "), float(" << fetchPos.z() << "))";
+ return buf.str();
+ }
+}
+
+std::string ImageDescriptorCase::genResourceAccessSource (vk::VkShaderStage stage) const
+{
+ DE_UNREF(stage);
+
+ const char* const dimensionArray = (m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? ("1DArray")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D || m_viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? ("2DArray")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? ("3D")
+ : (m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || m_viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? ("CubeArray")
+ : (DE_NULL);
+ const char* const accessPostfixA = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? ("")
+ : (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? ("A")
+ : (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? ("[0]")
+ : (DE_NULL);
+ const char* const accessPostfixB = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? ("")
+ : (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? ("B")
+ : (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? ("[1]")
+ : (DE_NULL);
+
+ switch (m_descriptorType)
+ {
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
+ case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ {
+ const std::string coodStr[4] =
+ {
+ genSampleCoordStr(0),
+ genSampleCoordStr(1),
+ genSampleCoordStr(2),
+ genSampleCoordStr(3),
+ };
+ std::ostringstream buf;
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLER)
+ {
+ buf << " if (quadrant_id == 0)\n"
+ << " result_color = textureLod(sampler" << dimensionArray << "(u_separateTexture, u_separateSampler" << accessPostfixA << "), " << coodStr[0] << ", 0.0);\n"
+ << " else if (quadrant_id == 1)\n"
+ << " result_color = textureLod(sampler" << dimensionArray << "(u_separateTexture, u_separateSampler" << accessPostfixB << "), " << coodStr[1] << ", 0.0);\n"
+ << " else if (quadrant_id == 2)\n"
+ << " result_color = textureLod(sampler" << dimensionArray << "(u_separateTexture, u_separateSampler" << accessPostfixA << "), " << coodStr[2] << ", 0.0);\n"
+ << " else\n"
+ << " result_color = textureLod(sampler" << dimensionArray << "(u_separateTexture, u_separateSampler" << accessPostfixB << "), " << coodStr[3] << ", 0.0);\n";
+ }
+ else
+ {
+ buf << " if (quadrant_id == 0)\n"
+ << " result_color = textureLod(u_combinedTextureSampler" << accessPostfixA << ", " << coodStr[0] << ", 0.0);\n"
+ << " else if (quadrant_id == 1)\n"
+ << " result_color = textureLod(u_combinedTextureSampler" << accessPostfixB << ", " << coodStr[1] << ", 0.0);\n"
+ << " else if (quadrant_id == 2)\n"
+ << " result_color = textureLod(u_combinedTextureSampler" << accessPostfixA << ", " << coodStr[2] << ", 0.0);\n"
+ << " else\n"
+ << " result_color = textureLod(u_combinedTextureSampler" << accessPostfixB << ", " << coodStr[3] << ", 0.0);\n";
+ }
+
+ return buf.str();
+ }
+
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ {
+ const std::string coodStr[4] =
+ {
+ genFetchCoordStr(0),
+ genFetchCoordStr(1),
+ genFetchCoordStr(2),
+ genFetchCoordStr(3),
+ };
+ std::ostringstream buf;
+
+ if (m_descriptorType == vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)
+ {
+ buf << " if (quadrant_id == 0)\n"
+ << " result_color = textureFetch(u_separateTexture" << accessPostfixA << ", " << coodStr[0] << ", 0);\n"
+ << " else if (quadrant_id == 1)\n"
+ << " result_color = textureFetch(u_separateTexture" << accessPostfixB << ", " << coodStr[1] << ", 0);\n"
+ << " else if (quadrant_id == 2)\n"
+ << " result_color = textureFetch(u_separateTexture" << accessPostfixA << ", " << coodStr[2] << ", 0);\n"
+ << " else\n"
+ << " result_color = textureFetch(u_separateTexture" << accessPostfixB << ", " << coodStr[3] << ", 0);\n";
+ }
+ else
+ {
+ buf << " if (quadrant_id == 0)\n"
+ << " result_color = imageLoad(u_image" << accessPostfixA << ", " << coodStr[0] << ");\n"
+ << " else if (quadrant_id == 1)\n"
+ << " result_color = imageLoad(u_image" << accessPostfixB << ", " << coodStr[1] << ");\n"
+ << " else if (quadrant_id == 2)\n"
+ << " result_color = imageLoad(u_image" << accessPostfixA << ", " << coodStr[2] << ");\n"
+ << " else\n"
+ << " result_color = imageLoad(u_image" << accessPostfixB << ", " << coodStr[3] << ");\n";
+ }
+
+ return buf.str();
+ }
+
+ default:
+ DE_FATAL("invalid descriptor");
+ return "";
+ }
+}
+
+std::string ImageDescriptorCase::genNoAccessSource (void) const
+{
+ return " if (quadrant_id == 1 || quadrant_id == 2)\n"
+ " result_color = vec4(0.0, 1.0, 0.0, 1.0);\n"
+ " else\n"
+ " result_color = vec4(1.0, 1.0, 0.0, 1.0);\n";
+}
+
+vkt::TestInstance* ImageDescriptorCase::createInstance (vkt::Context& context) const
+{
+ switch (m_descriptorType)
+ {
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
+ case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ if (m_exitingStages == vk::VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ DE_ASSERT(m_isPrimaryCmdBuf);
+ return new ImageSampleComputeInstance(context, m_descriptorType, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice, m_isImmutableSampler);
+ }
+ else
+ return new ImageSampleRenderInstance(context, m_isPrimaryCmdBuf, m_descriptorType, m_activeStages, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice, m_isImmutableSampler);
+
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ if (m_exitingStages == vk::VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ DE_ASSERT(m_isPrimaryCmdBuf);
+ return new ImageFetchComputeInstance(context, m_descriptorType, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice);
+ }
+ else
+ return new ImageFetchRenderInstance(context, m_isPrimaryCmdBuf, m_descriptorType, m_activeStages, m_shaderInterface, m_viewType, m_baseMipLevel, m_baseArraySlice);
+
+ default:
+ DE_FATAL("Impossible");
+ return DE_NULL;
+ }
+}
+
+class TexelBufferInstanceBuffers
+{
+public:
+ TexelBufferInstanceBuffers (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ int numTexelBuffers,
+ bool hasViewOffset);
+
+private:
+ static vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ de::MovePtr<vk::Allocation> *outAllocation);
+
+ static vk::Move<vk::VkBufferView> createBufferView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const tcu::TextureFormat& textureFormat,
+ deUint32 offset,
+ vk::VkBuffer buffer);
+
+ static vk::VkBufferMemoryBarrier createBarrier (vk::VkDescriptorType descriptorType, vk::VkBuffer buffer);
+
+ void populateSourceBuffer (const tcu::PixelBufferAccess& access);
+ void uploadData (const vk::DeviceInterface& vki, vk::VkDevice device, const vk::Allocation& memory, const de::ArrayBuffer<deUint8>& data);
+
+public:
+ static int getFetchPos (int fetchPosNdx);
+ tcu::Vec4 fetchTexelValue (int fetchPosNdx) const;
+
+ inline int getNumTexelBuffers (void) const { return m_numTexelBuffers; }
+ const tcu::TextureFormat& getTextureFormat (void) const { return m_imageFormat; }
+ inline vk::VkBufferView getBufferViewA (void) const { return *m_bufferViewA; }
+ inline vk::VkBufferView getBufferViewB (void) const { return *m_bufferViewB; }
+ inline const void* getBufferInitBarrierA (void) const { return &m_bufferBarrierA; }
+ inline const void* getBufferInitBarrierB (void) const { return &m_bufferBarrierB; }
+
+private:
+ enum
+ {
+ BUFFER_SIZE = 512,
+ VIEW_OFFSET_VALUE = 256,
+ VIEW_DATA_SIZE = 256, //!< size in bytes
+ VIEW_WIDTH = 64, //!< size in pixels
+ };
+ enum
+ {
+ // some arbitrary points
+ SAMPLE_POINT_0 = 6,
+ SAMPLE_POINT_1 = 51,
+ SAMPLE_POINT_2 = 42,
+ SAMPLE_POINT_3 = 25,
+ };
+
+ const deUint32 m_numTexelBuffers;
+ const tcu::TextureFormat m_imageFormat;
+ const deUint32 m_viewOffset;
+
+ de::ArrayBuffer<deUint8> m_sourceBufferA;
+ de::ArrayBuffer<deUint8> m_sourceBufferB;
+ const tcu::ConstPixelBufferAccess m_sourceViewA;
+ const tcu::ConstPixelBufferAccess m_sourceViewB;
+
+ de::MovePtr<vk::Allocation> m_bufferMemoryA;
+ de::MovePtr<vk::Allocation> m_bufferMemoryB;
+ const vk::Unique<vk::VkBuffer> m_bufferA;
+ const vk::Unique<vk::VkBuffer> m_bufferB;
+ const vk::Unique<vk::VkBufferView> m_bufferViewA;
+ const vk::Unique<vk::VkBufferView> m_bufferViewB;
+ const vk::VkBufferMemoryBarrier m_bufferBarrierA;
+ const vk::VkBufferMemoryBarrier m_bufferBarrierB;
+};
+
+TexelBufferInstanceBuffers::TexelBufferInstanceBuffers (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ int numTexelBuffers,
+ bool hasViewOffset)
+ : m_numTexelBuffers (numTexelBuffers)
+ , m_imageFormat (tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8)
+ , m_viewOffset ((hasViewOffset) ? ((deUint32)VIEW_OFFSET_VALUE) : (0u))
+ , m_sourceBufferA (BUFFER_SIZE)
+ , m_sourceBufferB ((numTexelBuffers == 1)
+ ? (0u)
+ : ((size_t)BUFFER_SIZE))
+ , m_sourceViewA (m_imageFormat, tcu::IVec3(VIEW_WIDTH, 1, 1), m_sourceBufferA.getElementPtr(m_viewOffset))
+ , m_sourceViewB (m_imageFormat, tcu::IVec3(VIEW_WIDTH, 1, 1), m_sourceBufferB.getElementPtr(m_viewOffset))
+ , m_bufferMemoryA (DE_NULL)
+ , m_bufferMemoryB (DE_NULL)
+ , m_bufferA (createBuffer(vki, device, allocator, descriptorType, &m_bufferMemoryA))
+ , m_bufferB ((numTexelBuffers == 1)
+ ? vk::Move<vk::VkBuffer>()
+ : createBuffer(vki, device, allocator, descriptorType, &m_bufferMemoryB))
+ , m_bufferViewA (createBufferView(vki, device, m_imageFormat, m_viewOffset, *m_bufferA))
+ , m_bufferViewB ((numTexelBuffers == 1)
+ ? vk::Move<vk::VkBufferView>()
+ : createBufferView(vki, device, m_imageFormat, m_viewOffset, *m_bufferB))
+ , m_bufferBarrierA (createBarrier(descriptorType, *m_bufferA))
+ , m_bufferBarrierB (createBarrier(descriptorType, *m_bufferB))
+{
+ DE_ASSERT(numTexelBuffers == 1 || numTexelBuffers == 2);
+ DE_ASSERT(VIEW_WIDTH * m_imageFormat.getPixelSize() == VIEW_DATA_SIZE);
+ DE_ASSERT(BUFFER_SIZE % m_imageFormat.getPixelSize() == 0);
+
+ // specify and upload
+
+ populateSourceBuffer(tcu::PixelBufferAccess(m_imageFormat, tcu::IVec3(BUFFER_SIZE / m_imageFormat.getPixelSize(), 1, 1), m_sourceBufferA.getPtr()));
+ uploadData(vki, device, *m_bufferMemoryA, m_sourceBufferA);
+
+ if (numTexelBuffers == 2)
+ {
+ populateSourceBuffer(tcu::PixelBufferAccess(m_imageFormat, tcu::IVec3(BUFFER_SIZE / m_imageFormat.getPixelSize(), 1, 1), m_sourceBufferB.getPtr()));
+ uploadData(vki, device, *m_bufferMemoryB, m_sourceBufferB);
+ }
+}
+
+vk::Move<vk::VkBuffer> TexelBufferInstanceBuffers::createBuffer (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::Allocator& allocator,
+ vk::VkDescriptorType descriptorType,
+ de::MovePtr<vk::Allocation> *outAllocation)
+{
+ const vk::VkBufferUsageFlags usage = (isUniformDescriptorType(descriptorType)) ? (vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) : (vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT);
+ const vk::VkBufferCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceSize)BUFFER_SIZE, // size
+ usage, // usage
+ 0u, // flags
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 0u, // queueFamilyCount
+ DE_NULL, // pQueueFamilyIndices
+ };
+ vk::Move<vk::VkBuffer> buffer (vk::createBuffer(vki, device, &createInfo));
+ de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(vki, device, allocator, *buffer, vk::MemoryRequirement::HostVisible));
+
+ *outAllocation = allocation;
+ return buffer;
+}
+
+vk::Move<vk::VkBufferView> TexelBufferInstanceBuffers::createBufferView (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ const tcu::TextureFormat& textureFormat,
+ deUint32 offset,
+ vk::VkBuffer buffer)
+{
+ const vk::VkBufferViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ DE_NULL,
+ buffer, // buffer
+ vk::VK_BUFFER_VIEW_TYPE_FORMATTED, // viewType
+ mapToVkTextureFormat(textureFormat), // format
+ (vk::VkDeviceSize)offset, // offset
+ (vk::VkDeviceSize)VIEW_DATA_SIZE // range
+ };
+ return vk::createBufferView(vki, device, &createInfo);
+}
+
+vk::VkBufferMemoryBarrier TexelBufferInstanceBuffers::createBarrier (vk::VkDescriptorType descriptorType, vk::VkBuffer buffer)
+{
+ const vk::VkMemoryInputFlags inputBit = (isUniformDescriptorType(descriptorType)) ? (vk::VK_MEMORY_INPUT_UNIFORM_READ_BIT) : (vk::VK_MEMORY_INPUT_SHADER_READ_BIT);
+ const vk::VkBufferMemoryBarrier barrier =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_MEMORY_OUTPUT_HOST_WRITE_BIT, // outputMask
+ inputBit, // inputMask
+ vk::VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ vk::VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
+ buffer , // buffer
+ 0u, // offset
+ (vk::VkDeviceSize)BUFFER_SIZE // size
+ };
+ return barrier;
+}
+
+void TexelBufferInstanceBuffers::populateSourceBuffer (const tcu::PixelBufferAccess& access)
+{
+ DE_ASSERT(access.getHeight() == 1);
+ DE_ASSERT(access.getDepth() == 1);
+
+ const deInt32 width = access.getWidth();
+
+ for (int x = 0; x < width; ++x)
+ {
+ const int red = 255 * x / width; //!< gradient from 0 -> max (detects large offset errors)
+ const int green = ((x % 2 == 0) ? (127) : (0)) + ((x % 4 < 3) ? (128) : (0)); //!< 3-level M pattern (detects small offset errors)
+ const int blue = 16 * (x % 16); //!< 16-long triangle wave
+
+ DE_ASSERT(de::inRange(red, 0, 255));
+ DE_ASSERT(de::inRange(green, 0, 255));
+ DE_ASSERT(de::inRange(blue, 0, 255));
+
+ access.setPixel(tcu::IVec4(red, green, blue, 255), x, 0, 0);
+ }
+}
+
+void TexelBufferInstanceBuffers::uploadData (const vk::DeviceInterface& vki, vk::VkDevice device, const vk::Allocation& memory, const de::ArrayBuffer<deUint8>& data)
+{
+ deMemcpy(memory.getHostPtr(), data.getPtr(), data.size());
+ flushMappedMemoryRange(vki, device, memory.getMemory(), memory.getOffset(), data.size());
+}
+
+int TexelBufferInstanceBuffers::getFetchPos (int fetchPosNdx)
+{
+ static const int fetchPositions[4] =
+ {
+ SAMPLE_POINT_0,
+ SAMPLE_POINT_1,
+ SAMPLE_POINT_2,
+ SAMPLE_POINT_3,
+ };
+ return de::getSizedArrayElement<4>(fetchPositions, fetchPosNdx);
+}
+
+tcu::Vec4 TexelBufferInstanceBuffers::fetchTexelValue (int fetchPosNdx) const
+{
+ // source order is ABAB
+ const tcu::ConstPixelBufferAccess& texelSrcA = m_sourceViewA;
+ const tcu::ConstPixelBufferAccess& texelSrcB = (m_numTexelBuffers == 1) ? (m_sourceViewA) : (m_sourceViewB);
+ const tcu::ConstPixelBufferAccess& texelSrc = ((fetchPosNdx % 2) == 0) ? (texelSrcA) : (texelSrcB);
+
+ return texelSrc.getPixel(getFetchPos(fetchPosNdx), 0, 0);
+}
+
+class TexelBufferRenderInstance : public SingleCmdRenderInstance
+{
+public:
+ TexelBufferRenderInstance (vkt::Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ bool nonzeroViewOffset);
+
+private:
+ static vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags);
+
+ static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout);
+
+ static vk::Move<vk::VkDescriptorPool> createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface);
+
+ static vk::Move<vk::VkDescriptorSet> createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkDescriptorSetLayout layout,
+ vk::VkDescriptorPool pool,
+ vk::VkBufferView viewA,
+ vk::VkBufferView viewB);
+
+ void logTestPlan (void) const;
+ vk::VkPipelineLayout getPipelineLayout (void) const;
+ void writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const;
+ tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const;
+
+ enum
+ {
+ RENDER_SIZE = 128,
+ };
+
+ const vk::VkDescriptorType m_descriptorType;
+ const vk::VkShaderStageFlags m_stageFlags;
+ const ShaderInputInterface m_shaderInterface;
+ const bool m_nonzeroViewOffset;
+
+ const vk::Unique<vk::VkDescriptorSetLayout> m_descriptorSetLayout;
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+ const TexelBufferInstanceBuffers m_texelBuffers;
+ const vk::Unique<vk::VkDescriptorPool> m_descriptorPool;
+ const vk::Unique<vk::VkDescriptorSet> m_descriptorSet;
+};
+
+TexelBufferRenderInstance::TexelBufferRenderInstance (vkt::Context& context,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags stageFlags,
+ ShaderInputInterface shaderInterface,
+ bool nonzeroViewOffset)
+ : SingleCmdRenderInstance (context, isPrimaryCmdBuf, tcu::UVec2(RENDER_SIZE, RENDER_SIZE))
+ , m_descriptorType (descriptorType)
+ , m_stageFlags (stageFlags)
+ , m_shaderInterface (shaderInterface)
+ , m_nonzeroViewOffset (nonzeroViewOffset)
+ , m_descriptorSetLayout (createDescriptorSetLayout(m_vki, m_device, m_descriptorType, m_shaderInterface, m_stageFlags))
+ , m_pipelineLayout (createPipelineLayout(m_vki, m_device, *m_descriptorSetLayout))
+ , m_texelBuffers (m_vki, m_device, m_allocator, m_descriptorType, getInterfaceNumResources(m_shaderInterface), m_nonzeroViewOffset)
+ , m_descriptorPool (createDescriptorPool(m_vki, m_device, m_descriptorType, m_shaderInterface))
+ , m_descriptorSet (createDescriptorSet(m_vki, m_device, m_descriptorType, m_shaderInterface, *m_descriptorSetLayout, *m_descriptorPool, m_texelBuffers.getBufferViewA(), m_texelBuffers.getBufferViewB()))
+{
+}
+
+vk::Move<vk::VkDescriptorSetLayout> TexelBufferRenderInstance::createDescriptorSetLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkShaderStageFlags stageFlags)
+{
+ vk::DescriptorSetLayoutBuilder builder;
+
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleBinding(descriptorType, stageFlags);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleBinding(descriptorType, stageFlags);
+ builder.addSingleBinding(descriptorType, stageFlags);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArrayBinding(descriptorType, 2u, stageFlags);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ return builder.build(vki, device);
+}
+
+vk::Move<vk::VkPipelineLayout> TexelBufferRenderInstance::createPipelineLayout (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout)
+{
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ 1, // descriptorSetCount
+ &descriptorSetLayout, // pSetLayouts
+ 0u, // pushConstantRangeCount
+ DE_NULL, // pPushConstantRanges
+ };
+ return vk::createPipelineLayout(vki, device, &createInfo);
+}
+
+vk::Move<vk::VkDescriptorPool> TexelBufferRenderInstance::createDescriptorPool (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface)
+{
+ return vk::DescriptorPoolBuilder()
+ .addType(descriptorType, getInterfaceNumResources(shaderInterface))
+ .build(vki, device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> TexelBufferRenderInstance::createDescriptorSet (const vk::DeviceInterface& vki,
+ vk::VkDevice device,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ vk::VkDescriptorSetLayout layout,
+ vk::VkDescriptorPool pool,
+ vk::VkBufferView viewA,
+ vk::VkBufferView viewB)
+{
+ const vk::VkDescriptorInfo texelBufferInfos[2] =
+ {
+ createDescriptorInfo(viewA),
+ createDescriptorInfo(viewB),
+ };
+
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(vki, device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+ vk::DescriptorSetUpdateBuilder builder;
+
+ switch (shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &texelBufferInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &texelBufferInfos[0]);
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), descriptorType, &texelBufferInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, 2u, texelBufferInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(vki, device);
+ return descriptorSet;
+}
+
+void TexelBufferRenderInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Rendering 2x2 grid.\n"
+ << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType) << "\n"
+ << "Buffer view is created with a " << ((m_nonzeroViewOffset) ? ("non-zero") : ("zero")) << " offset.\n"
+ << "Buffer format is " << vk::getFormatName(mapToVkTextureFormat(m_texelBuffers.getTextureFormat())) << ".\n";
+
+ if (m_stageFlags == 0u)
+ {
+ msg << "Descriptors are not accessed in any shader stage.\n";
+ }
+ else
+ {
+ msg << "Color in each cell is fetched using the descriptor(s):\n";
+
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ msg << "Test sample " << resultNdx << ": fetch at position " << m_texelBuffers.getFetchPos(resultNdx);
+
+ if (m_shaderInterface != SHADER_INPUT_SINGLE_DESCRIPTOR)
+ {
+ const int srcResourceNdx = (resultNdx % 2); // ABAB source
+ msg << " from texelBuffer " << srcResourceNdx;
+ }
+
+ msg << "\n";
+ }
+
+ msg << "Descriptors are accessed in {"
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0) ? (" vertex") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_CONTROL_BIT) != 0) ? (" tess_control") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT) != 0) ? (" tess_evaluation") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0) ? (" geometry") : (""))
+ << (((m_stageFlags & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0) ? (" fragment") : (""))
+ << " } stages.";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+vk::VkPipelineLayout TexelBufferRenderInstance::getPipelineLayout (void) const
+{
+ return *m_pipelineLayout;
+}
+
+void TexelBufferRenderInstance::writeDrawCmdBuffer (vk::VkCmdBuffer cmd) const
+{
+ m_vki.cmdBindDescriptorSets(cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, getPipelineLayout(), 0, 1, &m_descriptorSet.get(), 0, DE_NULL);
+ m_vki.cmdDraw(cmd, 0, 6 * 4, 0, 1); // render four quads (two separate triangles)
+}
+
+tcu::TestStatus TexelBufferRenderInstance::verifyResultImage (const tcu::ConstPixelBufferAccess& result) const
+{
+ const tcu::Vec4 green (0.0f, 1.0f, 0.0f, 1.0f);
+ const tcu::Vec4 yellow (1.0f, 1.0f, 0.0f, 1.0f);
+ const bool doFetch = (m_stageFlags != 0u); // no active stages? Then don't fetch
+ const tcu::Vec4 sample0 = (!doFetch) ? (yellow) : (m_texelBuffers.fetchTexelValue(0));
+ const tcu::Vec4 sample1 = (!doFetch) ? (green) : (m_texelBuffers.fetchTexelValue(1));
+ const tcu::Vec4 sample2 = (!doFetch) ? (green) : (m_texelBuffers.fetchTexelValue(2));
+ const tcu::Vec4 sample3 = (!doFetch) ? (yellow) : (m_texelBuffers.fetchTexelValue(3));
+ tcu::Surface reference (m_targetSize.x(), m_targetSize.y());
+
+ drawQuadrantReferenceResult(reference.getAccess(), sample0, sample1, sample2, sample3);
+
+ if (!bilinearCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", reference.getAccess(), result, tcu::RGBA(1, 1, 1, 1), tcu::COMPARE_LOG_RESULT))
+ return tcu::TestStatus::fail("Image verification failed");
+ else
+ return tcu::TestStatus::pass("Pass");
+}
+
+class TexelBufferComputeInstance : public vkt::TestInstance
+{
+public:
+ TexelBufferComputeInstance (vkt::Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ bool nonzeroViewOffset);
+
+private:
+ vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
+ vk::Move<vk::VkDescriptorPool> createDescriptorPool (void) const;
+ vk::Move<vk::VkDescriptorSet> createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout) const;
+
+ tcu::TestStatus iterate (void);
+ void logTestPlan (void) const;
+ tcu::TestStatus testResourceAccess (void);
+
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const bool m_nonzeroViewOffset;
+
+ const vk::DeviceInterface& m_vki;
+ const vk::VkDevice m_device;
+ const vk::VkQueue m_queue;
+ const deUint32 m_queueFamilyIndex;
+ vk::Allocator& m_allocator;
+
+ const ComputeInstanceResultBuffer m_result;
+ const TexelBufferInstanceBuffers m_texelBuffers;
+};
+
+TexelBufferComputeInstance::TexelBufferComputeInstance (Context& context,
+ vk::VkDescriptorType descriptorType,
+ ShaderInputInterface shaderInterface,
+ bool nonzeroViewOffset)
+ : vkt::TestInstance (context)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_nonzeroViewOffset (nonzeroViewOffset)
+ , m_vki (context.getDeviceInterface())
+ , m_device (context.getDevice())
+ , m_queue (context.getUniversalQueue())
+ , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
+ , m_allocator (context.getDefaultAllocator())
+ , m_result (m_vki, m_device, m_allocator)
+ , m_texelBuffers (m_vki, m_device, m_allocator, m_descriptorType, getInterfaceNumResources(m_shaderInterface), m_nonzeroViewOffset)
+{
+}
+
+vk::Move<vk::VkDescriptorSetLayout> TexelBufferComputeInstance::createDescriptorSetLayout (void) const
+{
+ vk::DescriptorSetLayoutBuilder builder;
+
+ builder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.addArrayBinding(m_descriptorType, 2u, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ };
+
+ return builder.build(m_vki, m_device);
+}
+
+vk::Move<vk::VkDescriptorPool> TexelBufferComputeInstance::createDescriptorPool (void) const
+{
+ return vk::DescriptorPoolBuilder()
+ .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
+ .addType(m_descriptorType, getInterfaceNumResources(m_shaderInterface))
+ .build(m_vki, m_device, vk::VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1);
+}
+
+vk::Move<vk::VkDescriptorSet> TexelBufferComputeInstance::createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout) const
+{
+ const vk::VkDescriptorInfo resultInfo = createDescriptorInfo(m_result.getBufferView());
+ const vk::VkDescriptorInfo texelBufferInfos[2] =
+ {
+ createDescriptorInfo(m_texelBuffers.getBufferViewA()),
+ createDescriptorInfo(m_texelBuffers.getBufferViewB()),
+ };
+
+ vk::Move<vk::VkDescriptorSet> descriptorSet = allocDescriptorSet(m_vki, m_device, pool, vk::VK_DESCRIPTOR_SET_USAGE_ONE_SHOT, layout);
+ vk::DescriptorSetUpdateBuilder builder;
+
+ // result
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultInfo);
+
+ // texel buffers
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, &texelBufferInfos[0]);
+ break;
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, &texelBufferInfos[0]);
+ builder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), m_descriptorType, &texelBufferInfos[1]);
+ break;
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ builder.writeArray(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), m_descriptorType, 2u, texelBufferInfos);
+ break;
+
+ default:
+ DE_FATAL("Impossible");
+ }
+
+ builder.update(m_vki, m_device);
+ return descriptorSet;
+}
+
+tcu::TestStatus TexelBufferComputeInstance::iterate (void)
+{
+ logTestPlan();
+ return testResourceAccess();
+}
+
+void TexelBufferComputeInstance::logTestPlan (void) const
+{
+ std::ostringstream msg;
+
+ msg << "Fetching 4 values from image in compute shader.\n"
+ << "Single descriptor set. Descriptor set contains "
+ << ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
+ (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? "two" :
+ (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
+ (const char*)DE_NULL)
+ << " descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType) << "\n"
+ << "Buffer view is created with a " << ((m_nonzeroViewOffset) ? ("non-zero") : ("zero")) << " offset.\n"
+ << "Buffer format is " << vk::getFormatName(mapToVkTextureFormat(m_texelBuffers.getTextureFormat())) << ".\n";
+
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ msg << "Test sample " << resultNdx << ": fetch at position " << m_texelBuffers.getFetchPos(resultNdx);
+
+ if (m_shaderInterface != SHADER_INPUT_SINGLE_DESCRIPTOR)
+ {
+ const int srcResourceNdx = (resultNdx % 2); // ABAB source
+ msg << " from texelBuffer " << srcResourceNdx;
+ }
+
+ msg << "\n";
+ }
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << msg.str()
+ << tcu::TestLog::EndMessage;
+}
+
+tcu::TestStatus TexelBufferComputeInstance::testResourceAccess (void)
+{
+ const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
+ const vk::Unique<vk::VkDescriptorPool> descriptorPool (createDescriptorPool());
+ const vk::Unique<vk::VkDescriptorSet> descriptorSet (createDescriptorSet(*descriptorPool, *descriptorSetLayout));
+ const ComputePipeline pipeline (m_vki, m_device, m_context.getBinaryCollection(), 1, &descriptorSetLayout.get());
+
+ const vk::VkDescriptorSet descriptorSets[] = { *descriptorSet };
+ const int numDescriptorSets = DE_LENGTH_OF_ARRAY(descriptorSets);
+ const deUint32* const dynamicOffsets = DE_NULL;
+ const int numDynamicOffsets = 0;
+ const void* const preBarriers[] = { m_texelBuffers.getBufferInitBarrierA(), m_texelBuffers.getBufferInitBarrierB() };
+ const int numPreBarriers = m_texelBuffers.getNumTexelBuffers();
+ const void* const postBarriers[] = { m_result.getResultReadBarrier() };
+ const int numPostBarriers = DE_LENGTH_OF_ARRAY(postBarriers);
+
+ const ComputeCommand compute (m_vki,
+ m_device,
+ pipeline.getPipeline(),
+ pipeline.getPipelineLayout(),
+ tcu::UVec3(4, 1, 1),
+ numDescriptorSets, descriptorSets,
+ numDynamicOffsets, dynamicOffsets,
+ numPreBarriers, preBarriers,
+ numPostBarriers, postBarriers);
+
+ tcu::Vec4 results[4];
+ bool anyResultSet = false;
+ bool allResultsOk = true;
+
+ compute.submitAndWait(m_queueFamilyIndex, m_queue);
+ m_result.readResultContentsTo(&results);
+
+ // verify
+ for (int resultNdx = 0; resultNdx < 4; ++resultNdx)
+ {
+ const tcu::Vec4 result = results[resultNdx];
+ const tcu::Vec4 reference = m_texelBuffers.fetchTexelValue(resultNdx);
+ const tcu::Vec4 conversionThreshold = tcu::Vec4(1.0f / 255.0f);
+
+ if (result != tcu::Vec4(-1.0f))
+ anyResultSet = true;
+
+ if (tcu::boolAny(tcu::greaterThan(tcu::abs(result - reference), conversionThreshold)))
+ {
+ allResultsOk = false;
+
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Test sample " << resultNdx << ": Expected " << reference << ", got " << result
+ << tcu::TestLog::EndMessage;
+ }
+ }
+
+ // read back and verify
+ if (allResultsOk)
+ return tcu::TestStatus::pass("Pass");
+ else if (anyResultSet)
+ return tcu::TestStatus::fail("Invalid result values");
+ else
+ {
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Result buffer was not written to."
+ << tcu::TestLog::EndMessage;
+ return tcu::TestStatus::fail("Result buffer was not written to");
+ }
+}
+
+class TexelBufferDescriptorCase : public QuadrantRendederCase
+{
+public:
+ enum
+ {
+ FLAG_VIEW_OFFSET = (1u << 1u),
+ };
+ // enum continues where resource flags ends
+ DE_STATIC_ASSERT((deUint32)FLAG_VIEW_OFFSET == (deUint32)RESOURCE_FLAG_LAST);
+
+ TexelBufferDescriptorCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface shaderInterface,
+ deUint32 flags);
+
+private:
+ std::string genExtensionDeclarations (vk::VkShaderStage stage) const;
+ std::string genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const;
+ std::string genResourceAccessSource (vk::VkShaderStage stage) const;
+ std::string genNoAccessSource (void) const;
+
+ vkt::TestInstance* createInstance (vkt::Context& context) const;
+
+ const bool m_isPrimaryCmdBuf;
+ const vk::VkDescriptorType m_descriptorType;
+ const ShaderInputInterface m_shaderInterface;
+ const bool m_nonzeroViewOffset;
+};
+
+TexelBufferDescriptorCase::TexelBufferDescriptorCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface shaderInterface,
+ deUint32 flags)
+ : QuadrantRendederCase (testCtx, name, description, glu::GLSL_VERSION_310_ES, exitingStages, activeStages)
+ , m_isPrimaryCmdBuf (isPrimaryCmdBuf)
+ , m_descriptorType (descriptorType)
+ , m_shaderInterface (shaderInterface)
+ , m_nonzeroViewOffset (((flags & FLAG_VIEW_OFFSET) != 0) ? (1u) : (0u))
+{
+}
+
+std::string TexelBufferDescriptorCase::genExtensionDeclarations (vk::VkShaderStage stage) const
+{
+ DE_UNREF(stage);
+ return "#extension GL_EXT_texture_buffer : require\n";
+}
+
+std::string TexelBufferDescriptorCase::genResourceDeclarations (vk::VkShaderStage stage, int numUsedBindings) const
+{
+ DE_UNREF(stage);
+
+ const bool isUniform = isUniformDescriptorType(m_descriptorType);
+ const char* const storageType = (isUniform) ? ("samplerBuffer ") : ("readonly imageBuffer ");
+ const char* const formatQualifier = (isUniform) ? ("") : (", rgba8");
+
+ switch (m_shaderInterface)
+ {
+ case SHADER_INPUT_SINGLE_DESCRIPTOR:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + formatQualifier + ") uniform highp " + storageType + " u_texelBuffer;\n";
+
+ case SHADER_INPUT_MULTIPLE_DESCRIPTORS:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + formatQualifier + ") uniform highp " + storageType + " u_texelBufferA;\n"
+ "layout(set = 0, binding = " + de::toString(numUsedBindings+1) + formatQualifier + ") uniform highp " + storageType + " u_texelBufferB;\n";
+
+ case SHADER_INPUT_DESCRIPTOR_ARRAY:
+ return "layout(set = 0, binding = " + de::toString(numUsedBindings) + formatQualifier + ") uniform highp " + storageType + " u_texelBuffer[2];\n";
+
+ default:
+ DE_FATAL("Impossible");
+ return "";
+ }
+}
+
+std::string TexelBufferDescriptorCase::genResourceAccessSource (vk::VkShaderStage stage) const
+{
+ DE_UNREF(stage);
+
+ const char* const accessPostfixA = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? ("")
+ : (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? ("A")
+ : (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? ("[0]")
+ : (DE_NULL);
+ const char* const accessPostfixB = (m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? ("")
+ : (m_shaderInterface == SHADER_INPUT_MULTIPLE_DESCRIPTORS) ? ("B")
+ : (m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? ("[1]")
+ : (DE_NULL);
+ const char* const fetchFunc = (isUniformDescriptorType(m_descriptorType)) ? ("texelFetch") : ("imageLoad");
+ std::ostringstream buf;
+
+ buf << " if (quadrant_id == 0)\n"
+ << " result_color = " << fetchFunc << "(u_texelBuffer" << accessPostfixA << ", " << TexelBufferInstanceBuffers::getFetchPos(0) << ");\n"
+ << " else if (quadrant_id == 1)\n"
+ << " result_color = " << fetchFunc << "(u_texelBuffer" << accessPostfixB << ", " << TexelBufferInstanceBuffers::getFetchPos(1) << ");\n"
+ << " else if (quadrant_id == 2)\n"
+ << " result_color = " << fetchFunc << "(u_texelBuffer" << accessPostfixA << ", " << TexelBufferInstanceBuffers::getFetchPos(2) << ");\n"
+ << " else\n"
+ << " result_color = " << fetchFunc << "(u_texelBuffer" << accessPostfixB << ", " << TexelBufferInstanceBuffers::getFetchPos(3) << ");\n";
+
+ return buf.str();
+}
+
+std::string TexelBufferDescriptorCase::genNoAccessSource (void) const
+{
+ return " if (quadrant_id == 1 || quadrant_id == 2)\n"
+ " result_color = vec4(0.0, 1.0, 0.0, 1.0);\n"
+ " else\n"
+ " result_color = vec4(1.0, 1.0, 0.0, 1.0);\n";
+}
+
+vkt::TestInstance* TexelBufferDescriptorCase::createInstance (vkt::Context& context) const
+{
+ if (m_exitingStages == vk::VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ DE_ASSERT(m_isPrimaryCmdBuf); // secondaries are only valid within renderpass
+ return new TexelBufferComputeInstance(context, m_descriptorType, m_shaderInterface, m_nonzeroViewOffset);
+ }
+ else
+ return new TexelBufferRenderInstance(context, m_isPrimaryCmdBuf, m_descriptorType, m_activeStages, m_shaderInterface, m_nonzeroViewOffset);
+}
+
+void createShaderAccessImageTests (tcu::TestCaseGroup* group,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface dimension,
+ deUint32 resourceFlags)
+{
+ static const struct
+ {
+ vk::VkImageViewType viewType;
+ const char* name;
+ const char* description;
+ deUint32 flags;
+ } s_imageTypes[] =
+ {
+ { vk::VK_IMAGE_VIEW_TYPE_1D, "1d", "1D image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_1D, "1d_base_mip", "1D image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ { vk::VK_IMAGE_VIEW_TYPE_1D, "1d_base_slice", "1D image subview with base array slice", ImageDescriptorCase::FLAG_BASE_SLICE },
+
+ { vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array", "1D array image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array_base_mip", "1D array image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ { vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array_base_slice", "1D array image subview with base array slice", ImageDescriptorCase::FLAG_BASE_SLICE },
+
+ { vk::VK_IMAGE_VIEW_TYPE_2D, "2d", "2D image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_2D, "2d_base_mip", "2D image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ { vk::VK_IMAGE_VIEW_TYPE_2D, "2d_base_slice", "2D image subview with base array slice", ImageDescriptorCase::FLAG_BASE_SLICE },
+
+ { vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array", "2D array image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array_base_mip", "2D array image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ { vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array_base_slice", "2D array image subview with base array slice", ImageDescriptorCase::FLAG_BASE_SLICE },
+
+ { vk::VK_IMAGE_VIEW_TYPE_3D, "3d", "3D image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_3D, "3d_base_mip", "3D image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ // no 3d array textures
+
+ { vk::VK_IMAGE_VIEW_TYPE_CUBE, "cube", "Cube image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_CUBE, "cube_base_mip", "Cube image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ { vk::VK_IMAGE_VIEW_TYPE_CUBE, "cube_base_slice", "Cube image subview with base array slice", ImageDescriptorCase::FLAG_BASE_SLICE },
+
+ { vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array", "Cube image view", 0u },
+ { vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array_base_mip", "Cube image subview with base mip level", ImageDescriptorCase::FLAG_BASE_MIP },
+ { vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array_base_slice", "Cube image subview with base array slice", ImageDescriptorCase::FLAG_BASE_SLICE },
+ };
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_imageTypes); ++ndx)
+ {
+ // never overlap
+ DE_ASSERT((s_imageTypes[ndx].flags & resourceFlags) == 0u);
+
+ group->addChild(new ImageDescriptorCase(group->getTestContext(),
+ s_imageTypes[ndx].name,
+ s_imageTypes[ndx].description,
+ isPrimaryCmdBuf,
+ descriptorType,
+ exitingStages,
+ activeStages,
+ dimension,
+ s_imageTypes[ndx].viewType,
+ s_imageTypes[ndx].flags | resourceFlags));
+ }
+}
+
+void createShaderAccessTexelBufferTests (tcu::TestCaseGroup* group,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface dimension,
+ deUint32 resourceFlags)
+{
+ DE_ASSERT(resourceFlags == 0);
+ DE_UNREF(resourceFlags);
+
+ static const struct
+ {
+ const char* name;
+ const char* description;
+ deUint32 flags;
+ } s_texelBufferTypes[] =
+ {
+ { "offset_zero", "View offset is zero", 0u },
+ { "offset_nonzero", "View offset is non-zero", TexelBufferDescriptorCase::FLAG_VIEW_OFFSET },
+ };
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_texelBufferTypes); ++ndx)
+ {
+ group->addChild(new TexelBufferDescriptorCase(group->getTestContext(),
+ s_texelBufferTypes[ndx].name,
+ s_texelBufferTypes[ndx].description,
+ isPrimaryCmdBuf,
+ descriptorType,
+ exitingStages,
+ activeStages,
+ dimension,
+ s_texelBufferTypes[ndx].flags));
+ }
+}
+
+void createShaderAccessBufferTests (tcu::TestCaseGroup* group,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags exitingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface dimension,
+ deUint32 resourceFlags)
+{
+ DE_ASSERT(resourceFlags == 0u);
+ DE_UNREF(resourceFlags);
+
+ static const struct
+ {
+ const char* name;
+ const char* description;
+ bool isForDynamicCases;
+ deUint32 flags;
+ } s_bufferTypes[] =
+ {
+ { "offset_view_zero", "View offset is zero", false, 0u },
+ { "offset_view_nonzero", "View offset is non-zero", false, BufferDescriptorCase::FLAG_VIEW_OFFSET },
+
+ { "offset_view_zero_dynamic_zero", "View offset is zero, dynamic offset is zero", true, BufferDescriptorCase::FLAG_DYNAMIC_OFFSET_ZERO },
+ { "offset_view_zero_dynamic_nonzero", "View offset is zero, dynamic offset is non-zero", true, BufferDescriptorCase::FLAG_DYNAMIC_OFFSET_NONZERO },
+ { "offset_view_zero_dynamic_not_set", "View offset is zero, dynamic offset is not supplied", true, 0u },
+ // \note no case for offset_view_nonzero_dynamic_zero since it doesn't produce any additional coverage
+ { "offset_view_nonzero_dynamic_nonzero", "View offset is non-zero, dynamic offset is non-zero", true, BufferDescriptorCase::FLAG_VIEW_OFFSET | BufferDescriptorCase::FLAG_DYNAMIC_OFFSET_NONZERO },
+ { "offset_view_nonzero_dynamic_not_set", "View offset is non-zero, dynamic offset is not supplied", true, BufferDescriptorCase::FLAG_VIEW_OFFSET },
+ };
+
+ const bool isDynamicCase = isDynamicDescriptorType(descriptorType);
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_bufferTypes); ++ndx)
+ {
+ if (isDynamicCase == s_bufferTypes[ndx].isForDynamicCases)
+ group->addChild(new BufferDescriptorCase(group->getTestContext(),
+ s_bufferTypes[ndx].name,
+ s_bufferTypes[ndx].description,
+ isPrimaryCmdBuf,
+ descriptorType,
+ exitingStages,
+ activeStages,
+ dimension,
+ s_bufferTypes[ndx].flags));
+ }
+}
+
+} // anonymous
+
+tcu::TestCaseGroup* createShaderAccessTests (tcu::TestContext& testCtx)
+{
+ static const struct
+ {
+ const bool isPrimary;
+ const char* name;
+ const char* description;
+ } s_bindTypes[] =
+ {
+ { true, "primary_cmd_buf", "Bind in primary command buffer" },
+ { false, "seconday_cmd_buf", "Bind in secondary command buffer" },
+ };
+ static const struct
+ {
+ const vk::VkDescriptorType descriptorType;
+ const char* name;
+ const char* description;
+ deUint32 flags;
+ } s_descriptorTypes[] =
+ {
+ { vk::VK_DESCRIPTOR_TYPE_SAMPLER, "sampler_mutable", "VK_DESCRIPTOR_TYPE_SAMPLER with mutable sampler", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_SAMPLER, "sampler_immutable", "VK_DESCRIPTOR_TYPE_SAMPLER with immutable sampler", RESOURCE_FLAG_IMMUTABLE_SAMPLER },
+ { vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "combined_image_sampler_mutable", "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER with mutable sampler", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "combined_image_sampler_immutable", "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER with immutable sampler", RESOURCE_FLAG_IMMUTABLE_SAMPLER },
+ { vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, "sampled_image", "VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "VK_DESCRIPTOR_TYPE_STORAGE_IMAGE", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer", "VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer", "VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, "uniform_buffer", "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "storage_buffer", "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, "uniform_buffer_dynamic", "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC", 0u },
+ { vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "storage_buffer_dynamic", "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC", 0u },
+ };
+ static const struct
+ {
+ const char* name;
+ const char* description;
+ vk::VkShaderStageFlags existingStages; //!< stages that exists
+ vk::VkShaderStageFlags activeStages; //!< stages that access resource
+ bool supportsSecondaryCmdBufs;
+ } s_shaderStages[] =
+ {
+ {
+ "no_access",
+ "No accessing stages",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ 0u,
+ true,
+ },
+ {
+ "vertex",
+ "Vertex stage",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ vk::VK_SHADER_STAGE_VERTEX_BIT,
+ true,
+ },
+ {
+ "tess_ctrl",
+ "Tessellation control stage",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_TESS_CONTROL_BIT | vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ vk::VK_SHADER_STAGE_TESS_CONTROL_BIT,
+ true,
+ },
+ {
+ "tess_eval",
+ "Tessellation evaluation stage",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_TESS_CONTROL_BIT | vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ vk::VK_SHADER_STAGE_TESS_EVALUATION_BIT,
+ true,
+ },
+ {
+ "geometry",
+ "Geometry stage",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_GEOMETRY_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ vk::VK_SHADER_STAGE_GEOMETRY_BIT,
+ true,
+ },
+ {
+ "fragment",
+ "Fragment stage",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ true,
+ },
+ {
+ "compute",
+ "Compute stage",
+ vk::VK_SHADER_STAGE_COMPUTE_BIT,
+ vk::VK_SHADER_STAGE_COMPUTE_BIT,
+ false,
+ },
+ {
+ "vertex_fragment",
+ "Vertex and fragment stages",
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ vk::VK_SHADER_STAGE_VERTEX_BIT | vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ true,
+ },
+ };
+ static const struct
+ {
+ ShaderInputInterface dimension;
+ const char* name;
+ const char* description;
+ } s_variableDimensions[] =
+ {
+ { SHADER_INPUT_SINGLE_DESCRIPTOR, "single_descriptor", "Single descriptor" },
+ { SHADER_INPUT_MULTIPLE_DESCRIPTORS, "multiple_descriptors", "Multiple descriptors" },
+ { SHADER_INPUT_DESCRIPTOR_ARRAY, "descriptor_array", "Descriptor array" },
+ };
+
+ de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "shader_access", "Access resource via descriptor in a single descriptor set"));
+
+ // .primary_cmd_buf...
+ for (int bindTypeNdx = 0; bindTypeNdx < DE_LENGTH_OF_ARRAY(s_bindTypes); ++bindTypeNdx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> bindGroup(new tcu::TestCaseGroup(testCtx, s_bindTypes[bindTypeNdx].name, s_bindTypes[bindTypeNdx].description));
+
+ // .sampler, .combined_image_sampler, other resource types ...
+ for (int descriptorNdx = 0; descriptorNdx < DE_LENGTH_OF_ARRAY(s_descriptorTypes); ++descriptorNdx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> typeGroup(new tcu::TestCaseGroup(testCtx, s_descriptorTypes[descriptorNdx].name, s_descriptorTypes[descriptorNdx].description));
+
+ for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(s_shaderStages); ++stageNdx)
+ {
+ if (s_bindTypes[bindTypeNdx].isPrimary || s_shaderStages[stageNdx].supportsSecondaryCmdBufs)
+ {
+ de::MovePtr<tcu::TestCaseGroup> stageGroup(new tcu::TestCaseGroup(testCtx, s_shaderStages[stageNdx].name, s_shaderStages[stageNdx].description));
+
+ for (int dimensionNdx = 0; dimensionNdx < DE_LENGTH_OF_ARRAY(s_variableDimensions); ++dimensionNdx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> dimensionGroup(new tcu::TestCaseGroup(testCtx, s_variableDimensions[dimensionNdx].name, s_variableDimensions[dimensionNdx].description));
+ void (*createTestsFunc)(tcu::TestCaseGroup* group,
+ bool isPrimaryCmdBuf,
+ vk::VkDescriptorType descriptorType,
+ vk::VkShaderStageFlags existingStages,
+ vk::VkShaderStageFlags activeStages,
+ ShaderInputInterface dimension,
+ deUint32 resourceFlags);
+
+ switch (s_descriptorTypes[descriptorNdx].descriptorType)
+ {
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
+ case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ createTestsFunc = createShaderAccessImageTests;
+ break;
+
+ case vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ createTestsFunc = createShaderAccessTexelBufferTests;
+ break;
+
+ case vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ createTestsFunc = createShaderAccessBufferTests;
+ break;
+
+ default:
+ createTestsFunc = DE_NULL;
+ DE_FATAL("Impossible");
+ }
+
+ if (createTestsFunc)
+ {
+ createTestsFunc(dimensionGroup.get(),
+ s_bindTypes[bindTypeNdx].isPrimary,
+ s_descriptorTypes[descriptorNdx].descriptorType,
+ s_shaderStages[stageNdx].existingStages,
+ s_shaderStages[stageNdx].activeStages,
+ s_variableDimensions[dimensionNdx].dimension,
+ s_descriptorTypes[descriptorNdx].flags);
+ }
+ else
+ DE_FATAL("Impossible");
+
+ stageGroup->addChild(dimensionGroup.release());
+ }
+
+ typeGroup->addChild(stageGroup.release());
+ }
+ }
+
+ bindGroup->addChild(typeGroup.release());
+ }
+
+ group->addChild(bindGroup.release());
+ }
+
+ return group.release();
+}
+
+} // BindingModel
+} // vkt
--- /dev/null
+#ifndef _VKTBINDINGSHADERACCESSTESTS_HPP
+#define _VKTBINDINGSHADERACCESSTESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Binding shader access tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace BindingModel
+{
+
+tcu::TestCaseGroup* createShaderAccessTests (tcu::TestContext& testCtx);
+
+} // BindingModel
+} // vkt
+
+#endif // _VKTBINDINGSHADERACCESSTESTS_HPP
--- /dev/null
+
+include_directories(
+ ..
+ )
+
+set(DEQP_VK_PIPELINE_SRCS
+ vktPipelineClearUtil.cpp
+ vktPipelineClearUtil.hpp
+ vktPipelineDepthTests.cpp
+ vktPipelineDepthTests.hpp
+ vktPipelineImageUtil.cpp
+ vktPipelineImageUtil.hpp
+ vktPipelineReferenceRenderer.cpp
+ vktPipelineReferenceRenderer.hpp
+ vktPipelineTests.cpp
+ vktPipelineTests.hpp
+ vktPipelineVertexUtil.cpp
+ vktPipelineVertexUtil.hpp
+ )
+
+set(DEQP_VK_PIPELINE_LIBS
+ tcutil
+ vkutil
+ referencerenderer
+ )
+
+add_library(deqp-vk-pipeline STATIC ${DEQP_VK_PIPELINE_SRCS})
+target_link_libraries(deqp-vk-pipeline ${DEQP_VK_PIPELINE_LIBS})
+
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for clear values.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineClearUtil.hpp"
+#include "vkImageUtil.hpp"
+#include "tcuTextureUtil.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+using namespace vk;
+
+tcu::Vec4 defaultClearColorFloat (const tcu::TextureFormat& format)
+{
+ const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(format);
+ return (defaultClearColorUnorm() - formatInfo.lookupBias) / formatInfo.lookupScale;
+}
+
+tcu::IVec4 defaultClearColorInt (const tcu::TextureFormat& format)
+{
+ const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(format);
+ const tcu::Vec4 color = (defaultClearColorUnorm() - formatInfo.lookupBias) / formatInfo.lookupScale;
+
+ const tcu::IVec4 result ((deInt32)deFloatRound(color.x()), (deInt32)deFloatRound(color.y()),
+ (deInt32)deFloatRound(color.z()), (deInt32)deFloatRound(color.w()));
+
+ return result;
+}
+
+tcu::UVec4 defaultClearColorUint (const tcu::TextureFormat& format)
+{
+ const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(format);
+ const tcu::Vec4 color = (defaultClearColorUnorm() - formatInfo.lookupBias) / formatInfo.lookupScale;
+
+ const tcu::UVec4 result ((deUint32)deFloatRound(color.x()), (deUint32)deFloatRound(color.y()),
+ (deUint32)deFloatRound(color.z()), (deUint32)deFloatRound(color.w()));
+
+ return result;
+}
+
+tcu::Vec4 defaultClearColorUnorm (void)
+{
+ return tcu::Vec4(0.39f, 0.58f, 0.93f, 1.0f);
+}
+
+float defaultClearDepth (void)
+{
+ return 1.0f;
+}
+
+deUint32 defaultClearStencil (void)
+{
+ return 0;
+}
+
+VkClearDepthStencilValue defaultClearDepthStencilValue (void)
+{
+ VkClearDepthStencilValue clearDepthStencilValue;
+ clearDepthStencilValue.depth = defaultClearDepth();
+ clearDepthStencilValue.stencil = defaultClearStencil();
+
+ return clearDepthStencilValue;
+}
+
+VkClearValue defaultClearValue (VkFormat clearFormat)
+{
+ VkClearValue clearValue;
+
+ if (isDepthStencilFormat(clearFormat))
+ {
+ const VkClearDepthStencilValue dsValue = defaultClearDepthStencilValue();
+ clearValue.ds.stencil = dsValue.stencil;
+ clearValue.ds.depth = dsValue.depth;
+ }
+ else
+ {
+ const tcu::TextureFormat tcuClearFormat = mapVkFormat(clearFormat);
+ if (isUintFormat(clearFormat))
+ {
+ const tcu::UVec4 defaultColor = defaultClearColorUint(tcuClearFormat);
+ clearValue.color.u32[0] = defaultColor.x();
+ clearValue.color.u32[1] = defaultColor.y();
+ clearValue.color.u32[2] = defaultColor.z();
+ clearValue.color.u32[3] = defaultColor.w();
+ }
+ else if (isIntFormat(clearFormat))
+ {
+ const tcu::IVec4 defaultColor = defaultClearColorInt(tcuClearFormat);
+ clearValue.color.s32[0] = defaultColor.x();
+ clearValue.color.s32[1] = defaultColor.y();
+ clearValue.color.s32[2] = defaultColor.z();
+ clearValue.color.s32[3] = defaultColor.w();
+ }
+ else
+ {
+ const tcu::Vec4 defaultColor = defaultClearColorFloat(tcuClearFormat);
+ clearValue.color.f32[0] = defaultColor.x();
+ clearValue.color.f32[1] = defaultColor.y();
+ clearValue.color.f32[2] = defaultColor.z();
+ clearValue.color.f32[3] = defaultColor.w();
+ }
+ }
+
+ return clearValue;
+}
+
+} // pipeline
+} // vkt
--- /dev/null
+#ifndef _VKTPIPELINECLEARUTIL_HPP
+#define _VKTPIPELINECLEARUTIL_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for clear values.
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuTexture.hpp"
+#include "tcuVectorUtil.hpp"
+#include "vkDefs.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+tcu::Vec4 defaultClearColorFloat (const tcu::TextureFormat& format);
+tcu::IVec4 defaultClearColorInt (const tcu::TextureFormat& format);
+tcu::UVec4 defaultClearColorUint (const tcu::TextureFormat& format);
+tcu::Vec4 defaultClearColorUnorm (void);
+float defaultClearDepth (void);
+deUint32 defaultClearStencil (void);
+
+vk::VkClearDepthStencilValue defaultClearDepthStencilValue (void);
+vk::VkClearValue defaultClearValue (vk::VkFormat format);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINECLEARUTIL_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Depth Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineDepthTests.hpp"
+#include "vktPipelineClearUtil.hpp"
+#include "vktPipelineImageUtil.hpp"
+#include "vktPipelineVertexUtil.hpp"
+#include "vktPipelineReferenceRenderer.hpp"
+#include "vktTestCase.hpp"
+#include "vktTestCaseUtil.hpp"
+#include "vkImageUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkPrograms.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "tcuImageCompare.hpp"
+#include "deUniquePtr.hpp"
+#include "deStringUtil.hpp"
+#include "deMemory.h"
+
+#include <sstream>
+#include <vector>
+
+namespace vkt
+{
+namespace pipeline
+{
+
+using namespace vk;
+
+namespace
+{
+
+bool isSupportedDepthStencilFormat (const InstanceInterface& instanceInterface, VkPhysicalDevice device, VkFormat format)
+{
+ VkFormatProperties formatProps;
+
+ VK_CHECK(instanceInterface.getPhysicalDeviceFormatProperties(device, format, &formatProps));
+
+ return (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0u;
+}
+
+tcu::TestStatus testSupportsDepthStencilFormat (Context& context, VkFormat format)
+{
+ DE_ASSERT(vk::isDepthStencilFormat(format));
+
+ if (isSupportedDepthStencilFormat(context.getInstanceInterface(), context.getPhysicalDevice(), format))
+ return tcu::TestStatus::pass("Format can be used in depth/stencil attachment");
+ else
+ return tcu::TestStatus::fail("Unsupported depth/stencil attachment format");
+}
+
+tcu::TestStatus testSupportsAtLeastOneDepthStencilFormat (Context& context, const std::vector<VkFormat> formats)
+{
+ std::ostringstream supportedFormatsMsg;
+ bool pass = false;
+
+ DE_ASSERT(!formats.empty());
+
+ for (size_t formatNdx = 0; formatNdx < formats.size(); formatNdx++)
+ {
+ const VkFormat format = formats[formatNdx];
+
+ DE_ASSERT(vk::isDepthStencilFormat(format));
+
+ if (isSupportedDepthStencilFormat(context.getInstanceInterface(), context.getPhysicalDevice(), format))
+ {
+ pass = true;
+ supportedFormatsMsg << vk::getFormatName(format);
+
+ if (formatNdx < formats.size() - 1)
+ supportedFormatsMsg << ", ";
+ }
+ }
+
+ if (pass)
+ return tcu::TestStatus::pass(std::string("Supported depth/stencil formats: ") + supportedFormatsMsg.str());
+ else
+ return tcu::TestStatus::fail("All depth/stencil formats are unsupported");
+}
+
+class DepthTest : public vkt::TestCase
+{
+public:
+ enum
+ {
+ QUAD_COUNT = 4
+ };
+
+ static const float quadDepths[QUAD_COUNT];
+
+ DepthTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ const VkFormat depthFormat,
+ const VkCompareOp depthCompareOps[QUAD_COUNT]);
+ virtual ~DepthTest (void);
+ virtual void initPrograms (SourceCollections& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const;
+
+private:
+ const VkFormat m_depthFormat;
+ VkCompareOp m_depthCompareOps[QUAD_COUNT];
+};
+
+class DepthTestInstance : public vkt::TestInstance
+{
+public:
+ DepthTestInstance (Context& context, const VkFormat depthFormat, const VkCompareOp depthCompareOps[DepthTest::QUAD_COUNT]);
+ virtual ~DepthTestInstance (void);
+ virtual tcu::TestStatus iterate (void);
+
+private:
+ tcu::TestStatus verifyImage (void);
+
+private:
+ VkCompareOp m_depthCompareOps[DepthTest::QUAD_COUNT];
+ const tcu::IVec2 m_renderSize;
+ const VkFormat m_colorFormat;
+ const VkFormat m_depthFormat;
+
+ Move<VkImage> m_colorImage;
+ de::MovePtr<Allocation> m_colorImageAlloc;
+ Move<VkImage> m_depthImage;
+ de::MovePtr<Allocation> m_depthImageAlloc;
+ Move<VkAttachmentView> m_colorAttachmentView;
+ Move<VkAttachmentView> m_depthAttachmentView;
+ Move<VkRenderPass> m_renderPass;
+ Move<VkFramebuffer> m_framebuffer;
+
+ Move<VkShaderModule> m_vertexShaderModule;
+ Move<VkShaderModule> m_fragmentShaderModule;
+ Move<VkShader> m_vertexShader;
+ Move<VkShader> m_fragmentShader;
+
+ Move<VkBuffer> m_vertexBuffer;
+ std::vector<Vertex4RGBA> m_vertices;
+ de::MovePtr<Allocation> m_vertexBufferAlloc;
+
+ Move<VkPipelineLayout> m_pipelineLayout;
+ Move<VkPipeline> m_graphicsPipelines[DepthTest::QUAD_COUNT];
+
+ Move<VkCmdPool> m_cmdPool;
+ Move<VkCmdBuffer> m_cmdBuffer;
+
+ Move<VkDynamicViewportState> m_viewportState;
+ Move<VkDynamicRasterState> m_rasterState;
+ Move<VkDynamicColorBlendState> m_colorBlendState;
+ Move<VkDynamicDepthStencilState> m_depthStencilState;
+
+ Move<VkFence> m_fence;
+};
+
+const float DepthTest::quadDepths[QUAD_COUNT] =
+{
+ 0.1f,
+ 0.0f,
+ 0.3f,
+ 0.2f
+};
+
+DepthTest::DepthTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ const VkFormat depthFormat,
+ const VkCompareOp depthCompareOps[QUAD_COUNT])
+ : vkt::TestCase (testContext, name, description)
+ , m_depthFormat (depthFormat)
+{
+ deMemcpy(m_depthCompareOps, depthCompareOps, sizeof(VkCompareOp) * QUAD_COUNT);
+}
+
+DepthTest::~DepthTest (void)
+{
+}
+
+TestInstance* DepthTest::createInstance (Context& context) const
+{
+ return new DepthTestInstance(context, m_depthFormat, m_depthCompareOps);
+}
+
+void DepthTest::initPrograms (SourceCollections& programCollection) const
+{
+ programCollection.glslSources.add("color_vert") << glu::VertexSource(
+ "#version 310 es\n"
+ "layout(location = 0) in vec4 position;\n"
+ "layout(location = 1) in vec4 color;\n"
+ "layout(location = 0) out highp vec4 vtxColor;\n"
+ "void main (void)\n"
+ "{\n"
+ " gl_Position = position;\n"
+ " vtxColor = color;\n"
+ "}\n");
+
+ programCollection.glslSources.add("color_frag") << glu::FragmentSource(
+ "#version 310 es\n"
+ "layout(location = 0) in highp vec4 vtxColor;\n"
+ "layout(location = 0) out highp vec4 fragColor;\n"
+ "void main (void)\n"
+ "{\n"
+ " fragColor = vtxColor;\n"
+ "}\n");
+}
+
+DepthTestInstance::DepthTestInstance (Context& context,
+ const VkFormat depthFormat,
+ const VkCompareOp depthCompareOps[DepthTest::QUAD_COUNT])
+ : vkt::TestInstance (context)
+ , m_renderSize (32, 32)
+ , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
+ , m_depthFormat (depthFormat)
+{
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const VkDevice vkDevice = context.getDevice();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ SimpleAllocator memAlloc (vk, vkDevice, getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()));
+
+ // Copy depth operators
+ deMemcpy(m_depthCompareOps, depthCompareOps, sizeof(VkCompareOp) * DepthTest::QUAD_COUNT);
+
+ // Create color image
+ {
+ const VkImageCreateInfo colorImageParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_IMAGE_TYPE_2D, // VkImageType imageType;
+ m_colorFormat, // VkFormat format;
+ { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
+ 1u, // deUint32 mipLevels;
+ 1u, // deUint32 arraySize;
+ 1u, // deUint32 samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT, // VkImageUsageFlags usage;
+ 0u, // VkImageCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_colorImage = createImage(vk, vkDevice, &colorImageParams);
+
+ // Allocate and bind color image memory
+ m_colorImageAlloc = memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *m_colorImage), MemoryRequirement::Any);
+ VK_CHECK(vk.bindImageMemory(vkDevice, *m_colorImage, m_colorImageAlloc->getMemory(), m_colorImageAlloc->getOffset()));
+ }
+
+ // Create depth image
+ {
+ // Check format support
+ if (!isSupportedDepthStencilFormat(context.getInstanceInterface(), context.getPhysicalDevice(), m_depthFormat))
+ throw tcu::NotSupportedError(std::string("Unsupported depth/stencil format: ") + getFormatName(m_depthFormat));
+
+ const VkImageCreateInfo depthImageParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_IMAGE_TYPE_2D, // VkImageType imageType;
+ m_depthFormat, // VkFormat format;
+ { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
+ 1u, // deUint32 mipLevels;
+ 1u, // deUint32 arraySize;
+ 1u, // deUint32 samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ VK_IMAGE_USAGE_DEPTH_STENCIL_BIT, // VkImageUsageFlags usage;
+ 0u, // VkImageCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_depthImage = createImage(vk, vkDevice, &depthImageParams);
+
+ // Allocate and bind depth image memory
+ m_depthImageAlloc = memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *m_depthImage), MemoryRequirement::Any);
+ VK_CHECK(vk.bindImageMemory(vkDevice, *m_depthImage, m_depthImageAlloc->getMemory(), m_depthImageAlloc->getOffset()));
+ }
+
+ // Create color attachment view
+ {
+ const VkAttachmentViewCreateInfo colorAttachmentViewParams =
+ {
+ VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // constvoid* pNext;
+ *m_colorImage, // VkImage image;
+ m_colorFormat, // VkFormat format;
+ 0u, // deUint32 mipLevel;
+ 0u, // deUint32 baseArraySlice;
+ 1u, // deUint32 arraySize;
+ 0u // VkAttachmentViewCreateFlags flags;
+ };
+
+ m_colorAttachmentView = createAttachmentView(vk, vkDevice, &colorAttachmentViewParams);
+ }
+
+ // Create depth attachment view
+ {
+ const VkAttachmentViewCreateInfo depthAttachmentViewParams =
+ {
+ VK_STRUCTURE_TYPE_ATTACHMENT_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // constvoid* pNext;
+ *m_depthImage, // VkImage image;
+ m_depthFormat, // VkFormat format;
+ 0u, // deUint32 mipLevel;
+ 0u, // deUint32 baseArraySlice;
+ 1u, // deUint32 arraySize;
+ 0u // VkAttachmentViewCreateFlags flags;
+ };
+
+ m_depthAttachmentView = createAttachmentView(vk, vkDevice, &depthAttachmentViewParams);
+ }
+
+ // Create render pass
+ {
+ const VkAttachmentDescription colorAttachmentDescription =
+ {
+ VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ m_colorFormat, // VkFormat format;
+ 1u, // deUint32 samples;
+ VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
+ VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout;
+ };
+
+ const VkAttachmentDescription depthAttachmentDescription =
+ {
+ VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ m_depthFormat, // VkFormat format;
+ 1u, // deUint32 samples;
+ VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout;
+ };
+
+ const VkAttachmentDescription attachments[2] =
+ {
+ colorAttachmentDescription,
+ depthAttachmentDescription
+ };
+
+ const VkAttachmentReference colorAttachmentReference =
+ {
+ 0u, // deUint32 attachment;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
+ };
+
+ const VkAttachmentReference depthAttachmentReference =
+ {
+ 1u, // deUint32 attachment;
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // VkImageLayout layout;
+ };
+
+ const VkSubpassDescription subpassDescription =
+ {
+ VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION, // VkStructureType sType;
+ DE_NULL, // constvoid* pNext;
+ VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
+ 0u, // VkSubpassDescriptionFlags flags;
+ 0u, // deUint32 inputCount;
+ DE_NULL, // constVkAttachmentReference* inputAttachments;
+ 1u, // deUint32 colorCount;
+ &colorAttachmentReference, // constVkAttachmentReference* colorAttachments;
+ DE_NULL, // constVkAttachmentReference* resolveAttachments;
+ depthAttachmentReference, // VkAttachmentReference depthStencilAttachment;
+ 0u, // deUint32 preserveCount;
+ DE_NULL // constVkAttachmentReference* preserveAttachments;
+ };
+
+ const VkRenderPassCreateInfo renderPassParams =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 2u, // deUint32 attachmentCount;
+ attachments, // const VkAttachmentDescription* pAttachments;
+ 1u, // deUint32 subpassCount;
+ &subpassDescription, // const VkSubpassDescription* pSubpasses;
+ 0u, // deUint32 dependencyCount;
+ DE_NULL // const VkSubpassDependency* pDependencies;
+ };
+
+ m_renderPass = createRenderPass(vk, vkDevice, &renderPassParams);
+ }
+
+ // Create framebuffer
+ {
+ const VkAttachmentBindInfo attachmentBindInfos[2] =
+ {
+ { *m_colorAttachmentView, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL },
+ { *m_depthAttachmentView, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }
+ };
+
+ const VkFramebufferCreateInfo framebufferParams =
+ {
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_renderPass, // VkRenderPass renderPass;
+ 2u, // deUint32 attachmentCount;
+ attachmentBindInfos, // const VkAttachmentBindInfo* pAttachments;
+ (deUint32)m_renderSize.x(), // deUint32 width;
+ (deUint32)m_renderSize.y(), // deUint32 height;
+ 1u // deUint32 layers;
+ };
+
+ m_framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams);
+ }
+
+ // Create pipeline layout
+ {
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // deUint32 descriptorSetCount;
+ DE_NULL, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL // const VkPushConstantRange* pPushConstantRanges;
+ };
+
+ m_pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
+ }
+
+ // Create shaders
+ {
+ m_vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("color_vert"), 0);
+ m_fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("color_frag"), 0);
+
+ const VkShaderCreateInfo vertexShaderParams =
+ {
+ VK_STRUCTURE_TYPE_SHADER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_vertexShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ 0u // VkShaderCreateFlags flags;
+ };
+
+ const VkShaderCreateInfo fragmentShaderParams =
+ {
+ VK_STRUCTURE_TYPE_SHADER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_fragmentShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ 0u // VkShaderCreateFlags flags;
+ };
+
+ m_vertexShader = createShader(vk, vkDevice, &vertexShaderParams);
+ m_fragmentShader = createShader(vk, vkDevice, &fragmentShaderParams);
+ }
+
+ // Create pipeline
+ {
+ const VkPipelineShaderStageCreateInfo shaderStageParams[2] =
+ {
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_SHADER_STAGE_VERTEX, // VkShaderStage stage;
+ *m_vertexShader, // VkShader shader;
+ DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_SHADER_STAGE_FRAGMENT, // VkShaderStage stage;
+ *m_fragmentShader, // VkShader shader;
+ DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
+ }
+ };
+
+ const VkVertexInputBindingDescription vertexInputBindingDescription =
+ {
+ 0u, // deUint32 binding;
+ sizeof(Vertex4RGBA), // deUint32 strideInBytes;
+ VK_VERTEX_INPUT_STEP_RATE_VERTEX // VkVertexInputStepRate stepRate;
+ };
+
+ const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[2] =
+ {
+ {
+ 0u, // deUint32 location;
+ 0u, // deUint32 binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ 0u // deUint32 offsetInBytes;
+ },
+ {
+ 1u, // deUint32 location;
+ 0u, // deUint32 binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ DE_OFFSET_OF(Vertex4RGBA, color), // deUint32 offsetInBytes;
+ }
+ };
+
+ const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 bindingCount;
+ &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ 2u, // deUint32 attributeCount;
+ vertexInputAttributeDescriptions // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+ };
+
+ const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology;
+ false // VkBool32 primitiveRestartEnable;
+ };
+
+ const VkPipelineViewportStateCreateInfo viewportStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u // deUint32 viewportCount;
+ };
+
+ const VkPipelineRasterStateCreateInfo rasterStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTER_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ false, // VkBool32 depthClipEnable;
+ false, // VkBool32 rasterizerDiscardEnable;
+ VK_FILL_MODE_SOLID, // VkFillMode fillMode;
+ VK_CULL_MODE_NONE, // VkCullMode cullMode;
+ VK_FRONT_FACE_CCW // VkFrontFace frontFace;
+ };
+
+ const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
+ {
+ false, // VkBool32 blendEnable;
+ VK_BLEND_ONE, // VkBlend srcBlendColor;
+ VK_BLEND_ZERO, // VkBlend destBlendColor;
+ VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
+ VK_BLEND_ONE, // VkBlend srcBlendAlpha;
+ VK_BLEND_ZERO, // VkBlend destBlendAlpha;
+ VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
+ VK_CHANNEL_R_BIT | VK_CHANNEL_G_BIT | VK_CHANNEL_B_BIT | VK_CHANNEL_A_BIT // VkChannelFlags channelWriteMask;
+ };
+
+ const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ false, // VkBool32 alphaToCoverageEnable;
+ false, // VkBool32 logicOpEnable;
+ VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
+ 1u, // deUint32 attachmentCount;
+ &colorBlendAttachmentState // const VkPipelineColorBlendAttachmentState* pAttachments;
+ };
+
+ const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 rasterSamples;
+ false, // VkBool32 sampleShadingEnable;
+ 0.0f, // float minSampleShading;
+ ~((VkSampleMask)0) // VkSampleMask sampleMask;
+ };
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ true, // VkBool32 depthTestEnable;
+ true, // VkBool32 depthWriteEnable;
+ VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
+ false, // VkBool32 depthBoundsEnable;
+ false, // VkBool32 stencilTestEnable;
+ // VkStencilOpState front;
+ {
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilFailOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilPassOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilDepthFailOp;
+ VK_COMPARE_OP_NEVER // VkCompareOp stencilCompareOp;
+ },
+ // VkStencilOpState back;
+ {
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilFailOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilPassOp;
+ VK_STENCIL_OP_KEEP, // VkStencilOp stencilDepthFailOp;
+ VK_COMPARE_OP_NEVER // VkCompareOp stencilCompareOp;
+ }
+ };
+
+ const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
+ {
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 2u, // deUint32 stageCount;
+ shaderStageParams, // const VkPipelineShaderStageCreateInfo* pStages;
+ &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
+ &rasterStateParams, // const VkPipelineRasterStateCreateInfo* pRasterState;
+ &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ &depthStencilStateParams, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ 0u, // VkPipelineCreateFlags flags;
+ *m_pipelineLayout, // VkPipelineLayout layout;
+ *m_renderPass, // VkRenderPass renderPass;
+ 0u, // deUint32 subpass;
+ 0u, // VkPipeline basePipelineHandle;
+ 0u // deInt32 basePipelineIndex;
+ };
+
+ for (int quadNdx = 0; quadNdx < DepthTest::QUAD_COUNT; quadNdx++)
+ {
+ depthStencilStateParams.depthCompareOp = depthCompareOps[quadNdx];
+ m_graphicsPipelines[quadNdx] = createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
+ }
+ }
+
+ // Create dynamic states
+ {
+ const VkViewport viewport =
+ {
+ 0.0f, // float originX;
+ 0.0f, // float originY;
+ (float)m_renderSize.x(), // float width;
+ (float)m_renderSize.y(), // float height;
+ 0.0f, // float minDepth;
+ 1.0f // float maxDepth;
+ };
+
+ const VkRect2D scissor =
+ {
+ { 0, 0 }, // VkOffset2D offset;
+ { m_renderSize.x(), m_renderSize.y() } // VkExtent2D extent;
+ };
+
+ const VkDynamicViewportStateCreateInfo viewportStateParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1, // deUint32 viewportAndScissorCount;
+ &viewport, // const VkViewport* pViewports;
+ &scissor // const VkRect2D* pScissors;
+ };
+
+ const VkDynamicRasterStateCreateInfo rasterStateParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0.0f, // float depthBias;
+ 0.0f, // float depthBiasClamp;
+ 0.0f, // float slopeScaledDepthBias;
+ 1.0f, // float lineWidth;
+ };
+
+ const VkDynamicColorBlendStateCreateInfo colorBlendStateParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ { 0.0f, 0.0f, 0.0f, 0.0f } // float blendConst[4];
+ };
+
+ const VkDynamicDepthStencilStateCreateInfo depthStencilStateParams =
+ {
+ VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0.0f, // float minDepthBounds;
+ 1.0f, // float maxDepthBounds;
+ 0x0, // deUint32 stencilReadMask;
+ 0x0, // deUint32 stencilWriteMask;
+ 0x0, // deUint32 stencilFrontRef;
+ 0x0 // deUint32 stencilBackRef;
+ };
+
+ m_viewportState = createDynamicViewportState(vk, vkDevice, &viewportStateParams);
+ m_rasterState = createDynamicRasterState(vk, vkDevice, &rasterStateParams);
+ m_colorBlendState = createDynamicColorBlendState(vk, vkDevice, &colorBlendStateParams);
+ m_depthStencilState = createDynamicDepthStencilState(vk, vkDevice, &depthStencilStateParams);
+ }
+
+ // Create vertex buffer
+ {
+ const VkBufferCreateInfo vertexBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1024u, // VkDeviceSize size;
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ 0u, // VkBufferCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_vertices = createOverlappingQuads();
+ m_vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams);
+ m_vertexBufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *m_vertexBuffer), MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(vkDevice, *m_vertexBuffer, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset()));
+
+ // Adjust depths
+ for (int quadNdx = 0; quadNdx < DepthTest::QUAD_COUNT; quadNdx++)
+ for (int vertexNdx = 0; vertexNdx < 6; vertexNdx++)
+ m_vertices[quadNdx * 6 + vertexNdx].position.z() = DepthTest::quadDepths[quadNdx];
+
+ // Load vertices into vertex buffer
+ deMemcpy(m_vertexBufferAlloc->getHostPtr(), m_vertices.data(), m_vertices.size() * sizeof(Vertex4RGBA));
+ flushMappedMemoryRange(vk, vkDevice, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferParams.size);
+ }
+
+ // Create command pool
+ {
+ const VkCmdPoolCreateInfo cmdPoolParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ queueFamilyIndex, // deUint32 queueFamilyIndex;
+ VK_CMD_POOL_CREATE_TRANSIENT_BIT // VkCmdPoolCreateFlags flags;
+ };
+
+ m_cmdPool = createCommandPool(vk, vkDevice, &cmdPoolParams);
+ }
+
+ // Create command buffer
+ {
+ const VkCmdBufferCreateInfo cmdBufferParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_cmdPool, // VkCmdPool cmdPool;
+ VK_CMD_BUFFER_LEVEL_PRIMARY, // VkCmdBufferLevel level;
+ 0u // VkCmdBufferCreateFlags flags;
+ };
+
+ const VkCmdBufferBeginInfo cmdBufferBeginInfo =
+ {
+ VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkCmdBufferOptimizeFlags flags;
+ DE_NULL, // VkRenderPass renderPass;
+ DE_NULL // VkFramebuffer framebuffer;
+ };
+
+ const VkClearValue attachmentClearValues[2] =
+ {
+ defaultClearValue(m_colorFormat),
+ defaultClearValue(m_depthFormat),
+ };
+
+ const VkRenderPassBeginInfo renderPassBeginInfo =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_renderPass, // VkRenderPass renderPass;
+ *m_framebuffer, // VkFramebuffer framebuffer;
+ { { 0, 0 }, { m_renderSize.x(), m_renderSize.y()} }, // VkRect2D renderArea;
+ 2, // deUint32 attachmentCount;
+ attachmentClearValues // const VkClearValue* pAttachmentClearValues;
+ };
+
+ m_cmdBuffer = createCommandBuffer(vk, vkDevice, &cmdBufferParams);
+
+ VK_CHECK(vk.beginCommandBuffer(*m_cmdBuffer, &cmdBufferBeginInfo));
+ vk.cmdBeginRenderPass(*m_cmdBuffer, &renderPassBeginInfo, VK_RENDER_PASS_CONTENTS_INLINE);
+
+ vk.cmdBindDynamicViewportState(*m_cmdBuffer, *m_viewportState);
+ vk.cmdBindDynamicRasterState(*m_cmdBuffer, *m_rasterState);
+ vk.cmdBindDynamicColorBlendState(*m_cmdBuffer, *m_colorBlendState);
+ vk.cmdBindDynamicDepthStencilState(*m_cmdBuffer, *m_depthStencilState);
+
+ const VkDeviceSize quadOffset = (m_vertices.size() / DepthTest::QUAD_COUNT) * sizeof(Vertex4RGBA);
+
+ for (int quadNdx = 0; quadNdx < DepthTest::QUAD_COUNT; quadNdx++)
+ {
+ VkDeviceSize vertexBufferOffset = quadOffset * quadNdx;
+
+ vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_graphicsPipelines[quadNdx]);
+ vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &m_vertexBuffer.get(), &vertexBufferOffset);
+ vk.cmdDraw(*m_cmdBuffer, 0, (deUint32)(m_vertices.size() / DepthTest::QUAD_COUNT), 0, 1);
+ }
+
+ vk.cmdEndRenderPass(*m_cmdBuffer);
+ VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer));
+ }
+
+ // Create fence
+ {
+ const VkFenceCreateInfo fenceParams =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u // VkFenceCreateFlags flags;
+ };
+
+ m_fence = createFence(vk, vkDevice, &fenceParams);
+ }
+}
+
+DepthTestInstance::~DepthTestInstance (void)
+{
+}
+
+tcu::TestStatus DepthTestInstance::iterate (void)
+{
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice vkDevice = m_context.getDevice();
+ const VkQueue queue = m_context.getUniversalQueue();
+
+ VK_CHECK(vk.resetFences(vkDevice, 1, &m_fence.get()));
+ VK_CHECK(vk.queueSubmit(queue, 1, &m_cmdBuffer.get(), *m_fence));
+ VK_CHECK(vk.waitForFences(vkDevice, 1, &m_fence.get(), true, ~(0ull) /* infinity*/));
+
+ return verifyImage();
+}
+
+tcu::TestStatus DepthTestInstance::verifyImage (void)
+{
+ const tcu::TextureFormat tcuColorFormat = mapVkFormat(m_colorFormat);
+ const tcu::TextureFormat tcuDepthFormat = mapVkFormat(m_depthFormat);
+ const ColorVertexShader vertexShader;
+ const ColorFragmentShader fragmentShader (tcuColorFormat, tcuDepthFormat);
+ const rr::Program program (&vertexShader, &fragmentShader);
+ ReferenceRenderer refRenderer (m_renderSize.x(), m_renderSize.y(), 1, tcuColorFormat, tcuDepthFormat, &program);
+ bool compareOk = false;
+
+ // Render reference image
+ {
+ for (int quadNdx = 0; quadNdx < DepthTest::QUAD_COUNT; quadNdx++)
+ {
+ // Set depth state
+ rr::RenderState renderState(refRenderer.getViewportState());
+ renderState.fragOps.depthTestEnabled = true;
+ renderState.fragOps.depthFunc = mapVkCompareOp(m_depthCompareOps[quadNdx]);
+
+ refRenderer.draw(renderState,
+ rr::PRIMITIVETYPE_TRIANGLES,
+ std::vector<Vertex4RGBA>(m_vertices.begin() + quadNdx * 6,
+ m_vertices.begin() + (quadNdx + 1) * 6));
+ }
+ }
+
+ // Compare result with reference image
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice vkDevice = m_context.getDevice();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ SimpleAllocator allocator (vk, vkDevice, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
+ de::MovePtr<tcu::TextureLevel> result = readColorAttachment(vk, vkDevice, queue, queueFamilyIndex, allocator, *m_colorImage, m_colorFormat, m_renderSize);
+
+ compareOk = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
+ "IntImageCompare",
+ "Image comparison",
+ refRenderer.getAccess(),
+ result->getAccess(),
+ tcu::UVec4(2, 2, 2, 2),
+ tcu::IVec3(1, 1, 0),
+ true,
+ tcu::COMPARE_LOG_RESULT);
+ }
+
+ if (compareOk)
+ return tcu::TestStatus::pass("Result image matches reference");
+ else
+ return tcu::TestStatus::fail("Image mismatch");
+}
+
+std::string getFormatCaseName (const VkFormat format)
+{
+ const std::string fullName = getFormatName(format);
+
+ DE_ASSERT(de::beginsWith(fullName, "VK_FORMAT_"));
+
+ return de::toLower(fullName.substr(10));
+}
+
+std::string getCompareOpsName (const VkCompareOp quadDepthOps[DepthTest::QUAD_COUNT])
+{
+ std::ostringstream name;
+
+ for (int quadNdx = 0; quadNdx < DepthTest::QUAD_COUNT; quadNdx++)
+ {
+ const std::string fullOpName = getCompareOpName(quadDepthOps[quadNdx]);
+
+ DE_ASSERT(de::beginsWith(fullOpName, "VK_COMPARE_OP_"));
+
+ name << de::toLower(fullOpName.substr(14));
+
+ if (quadNdx < DepthTest::QUAD_COUNT - 1)
+ name << "_";
+ }
+
+ return name.str();
+}
+
+std::string getCompareOpsDescription (const VkCompareOp quadDepthOps[DepthTest::QUAD_COUNT])
+{
+ std::ostringstream desc;
+ desc << "Draws " << DepthTest::QUAD_COUNT << " quads with depth compare ops: ";
+
+ for (int quadNdx = 0; quadNdx < DepthTest::QUAD_COUNT; quadNdx++)
+ {
+ desc << getCompareOpName(quadDepthOps[quadNdx]) << " at depth " << DepthTest::quadDepths[quadNdx];
+
+ if (quadNdx < DepthTest::QUAD_COUNT - 1)
+ desc << ", ";
+ }
+ return desc.str();
+}
+
+
+} // anonymous
+
+tcu::TestCaseGroup* createDepthTests (tcu::TestContext& testCtx)
+{
+ const VkFormat depthFormats[] =
+ {
+ VK_FORMAT_D16_UNORM,
+ VK_FORMAT_D24_UNORM,
+ VK_FORMAT_D32_SFLOAT,
+ VK_FORMAT_D16_UNORM_S8_UINT,
+ VK_FORMAT_D24_UNORM_S8_UINT,
+ VK_FORMAT_D32_SFLOAT_S8_UINT
+ };
+
+ // Each entry configures the depth compare operators of QUAD_COUNT quads.
+ // All entries cover pair-wise combinations of compare operators.
+ const VkCompareOp depthOps[][DepthTest::QUAD_COUNT] =
+ {
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_LESS, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_LESS, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_NOT_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_LESS_EQUAL },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS },
+ { VK_COMPARE_OP_GREATER_EQUAL, VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NEVER },
+ { VK_COMPARE_OP_LESS, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_EQUAL },
+ { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_ALWAYS, VK_COMPARE_OP_GREATER_EQUAL },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_ALWAYS },
+ { VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_LESS_EQUAL, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_GREATER }
+ };
+
+ de::MovePtr<tcu::TestCaseGroup> depthTests (new tcu::TestCaseGroup(testCtx, "depth", "Depth tests"));
+
+ // Tests for format features
+ {
+ de::MovePtr<tcu::TestCaseGroup> formatFeaturesTests (new tcu::TestCaseGroup(testCtx, "format_features", "Checks depth format features"));
+
+ // Formats that must be supported in all implementations
+ addFunctionCase(formatFeaturesTests.get(),
+ "support_d16_unorm",
+ "Tests if VK_FORMAT_D16_UNORM is supported as depth/stencil attachment format",
+ testSupportsDepthStencilFormat,
+ VK_FORMAT_D16_UNORM);
+
+ // Sets where at least one of the formats must be supported
+ const VkFormat depthOnlyFormats[] = { VK_FORMAT_D24_UNORM, VK_FORMAT_D32_SFLOAT };
+ const VkFormat depthStencilFormats[] = { VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT };
+
+ addFunctionCase(formatFeaturesTests.get(),
+ "support_d24_unorm_or_d32_sfloat",
+ "Tests if any of VK_FORMAT_D24_UNORM or VK_FORMAT_D32_SFLOAT are supported as depth/stencil attachment format",
+ testSupportsAtLeastOneDepthStencilFormat,
+ std::vector<VkFormat>(depthOnlyFormats, depthOnlyFormats + DE_LENGTH_OF_ARRAY(depthOnlyFormats)));
+
+ addFunctionCase(formatFeaturesTests.get(),
+ "support_d24_unorm_s8_uint_or_d32_sfloat_s8_uint",
+ "Tests if any of VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT are supported as depth/stencil attachment format",
+ testSupportsAtLeastOneDepthStencilFormat,
+ std::vector<VkFormat>(depthStencilFormats, depthStencilFormats + DE_LENGTH_OF_ARRAY(depthStencilFormats)));
+
+ depthTests->addChild(formatFeaturesTests.release());
+ }
+
+ // Tests for format and compare operators
+ {
+ de::MovePtr<tcu::TestCaseGroup> formatTests (new tcu::TestCaseGroup(testCtx, "format", "Uses different depth formats"));
+
+ for (size_t formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(depthFormats); formatNdx++)
+ {
+ de::MovePtr<tcu::TestCaseGroup> formatTest (new tcu::TestCaseGroup(testCtx,
+ getFormatCaseName(depthFormats[formatNdx]).c_str(),
+ (std::string("Uses format ") + getFormatName(depthFormats[formatNdx])).c_str()));
+ de::MovePtr<tcu::TestCaseGroup> compareOpsTests (new tcu::TestCaseGroup(testCtx, "compare_ops", "Combines depth compare operators"));
+
+ for (size_t opsNdx = 0; opsNdx < DE_LENGTH_OF_ARRAY(depthOps); opsNdx++)
+ {
+ compareOpsTests->addChild(new DepthTest(testCtx,
+ getCompareOpsName(depthOps[opsNdx]),
+ getCompareOpsDescription(depthOps[opsNdx]),
+ depthFormats[formatNdx],
+ depthOps[opsNdx]));
+ }
+ formatTest->addChild(compareOpsTests.release());
+ formatTests->addChild(formatTest.release());
+ }
+ depthTests->addChild(formatTests.release());
+ }
+
+ return depthTests.release();
+}
+
+} // pipeline
+} // vkt
--- /dev/null
+#ifndef _VKTPIPELINEDEPTHTESTS_HPP
+#define _VKTPIPELINEDEPTHTESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Depth Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestCase.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+tcu::TestCaseGroup* createDepthTests (tcu::TestContext& testCtx);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINEDEPTHTESTS_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for images.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineImageUtil.hpp"
+#include "vkImageUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRefUtil.hpp"
+#include "tcuTextureUtil.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+using namespace vk;
+
+de::MovePtr<tcu::TextureLevel> readColorAttachment (const vk::DeviceInterface& vk,
+ vk::VkDevice device,
+ vk::VkQueue queue,
+ deUint32 queueFamilyIndex,
+ vk::Allocator& allocator,
+ vk::VkImage image,
+ vk::VkFormat format,
+ const tcu::IVec2& renderSize)
+{
+ Move<VkBuffer> buffer;
+ de::MovePtr<Allocation> bufferAlloc;
+ Move<VkCmdPool> cmdPool;
+ Move<VkCmdBuffer> cmdBuffer;
+ Move<VkFence> fence;
+ const tcu::TextureFormat tcuFormat = mapVkFormat(format);
+ const VkDeviceSize pixelDataSize = renderSize.x() * renderSize.y() * tcuFormat.getPixelSize();
+ de::MovePtr<tcu::TextureLevel> resultLevel (new tcu::TextureLevel(tcuFormat, renderSize.x(), renderSize.y()));
+
+ // Create destination buffer
+ {
+ const VkBufferCreateInfo bufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ pixelDataSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, // VkBufferUsageFlags usage;
+ 0u, // VkBufferCreateFlags flags;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ };
+
+ buffer = createBuffer(vk, device, &bufferParams);
+ bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
+ VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
+ }
+
+ // Create command pool and buffer
+ {
+ const VkCmdPoolCreateInfo cmdPoolParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_POOL_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ queueFamilyIndex, // deUint32 queueFamilyIndex;
+ VK_CMD_POOL_CREATE_TRANSIENT_BIT // VkCmdPoolCreateFlags flags;
+ };
+
+ cmdPool = createCommandPool(vk, device, &cmdPoolParams);
+
+ const VkCmdBufferCreateInfo cmdBufferParams =
+ {
+ VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *cmdPool, // VkCmdPool cmdPool;
+ VK_CMD_BUFFER_LEVEL_PRIMARY, // VkCmdBufferLevel level;
+ 0u, // VkCmdBufferCreateFlags flags;
+ };
+
+ cmdBuffer = createCommandBuffer(vk, device, &cmdBufferParams);
+ }
+
+ // Create fence
+ {
+ const VkFenceCreateInfo fenceParams =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u // VkFenceCreateFlags flags;
+ };
+
+ fence = createFence(vk, device, &fenceParams);
+ }
+
+ // Barriers for copying image to buffer
+
+ const VkImageMemoryBarrier imageBarrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT, // VkMemoryOutputFlags outputMask;
+ VK_MEMORY_INPUT_TRANSFER_BIT, // VkMemoryInputFlags inputMask;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
+ VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL, // VkImageLayout newLayout;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
+ image, // VkImage image;
+ { // VkImageSubresourceRange subresourceRange;
+ VK_IMAGE_ASPECT_COLOR, // VkImageAspect aspect;
+ 0u, // deUint32 baseMipLevel;
+ 1u, // deUint32 mipLevels;
+ 0u, // deUint32 baseArraySlice;
+ 1u // deUint32 arraySize;
+ }
+ };
+
+ const VkBufferMemoryBarrier bufferBarrier =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_MEMORY_OUTPUT_TRANSFER_BIT, // VkMemoryOutputFlags outputMask;
+ VK_MEMORY_INPUT_HOST_READ_BIT, // VkMemoryInputFlags inputMask;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
+ *buffer, // VkBuffer buffer;
+ 0u, // VkDeviceSize offset;
+ pixelDataSize // VkDeviceSize size;
+ };
+
+ const VkCmdBufferBeginInfo cmdBufferBeginInfo =
+ {
+ VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT, // VkCmdBufferOptimizeFlags flags;
+ DE_NULL, // VkRenderPass renderPass;
+ DE_NULL // VkFramebuffer framebuffer;
+ };
+
+ const void* const imageBarrierPtr = &imageBarrier;
+ const void* const bufferBarrierPtr = &bufferBarrier;
+
+ // Copy image to buffer
+
+ const VkBufferImageCopy copyRegion =
+ {
+ 0u, // VkDeviceSize bufferOffset;
+ (deUint32)renderSize.x(), // deUint32 bufferRowLength;
+ (deUint32)renderSize.y(), // deUint32 bufferImageHeight;
+ { VK_IMAGE_ASPECT_COLOR, 0u, 0u }, // VkImageSubresource imageSubresource;
+ { 0, 0, 0 }, // VkOffset3D imageOffset;
+ { renderSize.x(), renderSize.y(), 1 } // VkExtent3D imageExtent;
+ };
+
+ VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
+ vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_FALSE, 1, &imageBarrierPtr);
+ vk.cmdCopyImageToBuffer(*cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL, *buffer, 1, ©Region);
+ vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_FALSE, 1, &bufferBarrierPtr);
+ VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
+
+ VK_CHECK(vk.queueSubmit(queue, 1, &cmdBuffer.get(), *fence));
+ VK_CHECK(vk.waitForFences(device, 1, &fence.get(), 0, ~(0ull) /* infinity */));
+
+ // Read buffer data
+ invalidateMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), pixelDataSize);
+ tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), bufferAlloc->getHostPtr()));
+
+ return resultLevel;
+}
+
+} // pipeline
+} // vkt
--- /dev/null
+#ifndef _VKTPIPELINEIMAGEUTIL_HPP
+#define _VKTPIPELINEIMAGEUTIL_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for images.
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "vkDefs.hpp"
+#include "vkDefs.hpp"
+#include "vkPlatform.hpp"
+#include "vkMemUtil.hpp"
+#include "vkRef.hpp"
+#include "tcuTexture.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+/*--------------------------------------------------------------------*//*!
+ * Gets a tcu::TextureLevel initialized with data from a VK color
+ * attachment.
+ *
+ * The VkImage must be non-multisampled and able to be used as a source
+ * operand for transfer operations.
+ *//*--------------------------------------------------------------------*/
+de::MovePtr<tcu::TextureLevel> readColorAttachment (const vk::DeviceInterface& vk,
+ vk::VkDevice device,
+ vk::VkQueue queue,
+ deUint32 queueFamilyIndex,
+ vk::Allocator& allocator,
+ vk::VkImage image,
+ vk::VkFormat format,
+ const tcu::IVec2& renderSize);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINEIMAGEUTIL_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Reference renderer.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineReferenceRenderer.hpp"
+#include "vktPipelineClearUtil.hpp"
+#include "rrShadingContext.hpp"
+#include "rrVertexAttrib.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+using namespace vk;
+
+rr::TestFunc mapVkCompareOp (VkCompareOp compareFunc)
+{
+ switch (compareFunc)
+ {
+ case VK_COMPARE_OP_NEVER: return rr::TESTFUNC_NEVER;
+ case VK_COMPARE_OP_LESS: return rr::TESTFUNC_LESS;
+ case VK_COMPARE_OP_EQUAL: return rr::TESTFUNC_EQUAL;
+ case VK_COMPARE_OP_LESS_EQUAL: return rr::TESTFUNC_LEQUAL;
+ case VK_COMPARE_OP_GREATER: return rr::TESTFUNC_GREATER;
+ case VK_COMPARE_OP_NOT_EQUAL: return rr::TESTFUNC_NOTEQUAL;
+ case VK_COMPARE_OP_GREATER_EQUAL: return rr::TESTFUNC_GEQUAL;
+ case VK_COMPARE_OP_ALWAYS: return rr::TESTFUNC_ALWAYS;
+ default:
+ DE_ASSERT(false);
+ }
+ return rr::TESTFUNC_LAST;
+}
+
+rr::PrimitiveType mapVkPrimitiveTopology (VkPrimitiveTopology primitiveTopology)
+{
+ switch (primitiveTopology)
+ {
+ case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: return rr::PRIMITIVETYPE_POINTS;
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: return rr::PRIMITIVETYPE_LINES;
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: return rr::PRIMITIVETYPE_LINE_STRIP;
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: return rr::PRIMITIVETYPE_TRIANGLES;
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: return rr::PRIMITIVETYPE_TRIANGLE_FAN;
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: return rr::PRIMITIVETYPE_TRIANGLE_STRIP;
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ: return rr::PRIMITIVETYPE_LINES_ADJACENCY;
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ: return rr::PRIMITIVETYPE_LINE_STRIP_ADJACENCY;
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ: return rr::PRIMITIVETYPE_TRIANGLES_ADJACENCY;
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ: return rr::PRIMITIVETYPE_TRIANGLE_STRIP_ADJACENCY;
+ default:
+ DE_ASSERT(false);
+ }
+ return rr::PRIMITIVETYPE_LAST;
+}
+
+ReferenceRenderer::ReferenceRenderer(int surfaceWidth,
+ int surfaceHeight,
+ int numSamples,
+ const tcu::TextureFormat& colorFormat,
+ const tcu::TextureFormat& depthStencilFormat,
+ const rr::Program* const program)
+ : m_surfaceWidth (surfaceWidth)
+ , m_surfaceHeight (surfaceHeight)
+ , m_numSamples (numSamples)
+ , m_colorFormat (colorFormat)
+ , m_depthStencilFormat (depthStencilFormat)
+ , m_program (program)
+{
+ const tcu::TextureChannelClass formatClass = tcu::getTextureChannelClass(colorFormat.type);
+ const bool hasDepthStencil = (m_depthStencilFormat.order != tcu::TextureFormat::CHANNELORDER_LAST);
+ const bool hasDepthBufferOnly = (m_depthStencilFormat.order == tcu::TextureFormat::D);
+ const bool hasStencilBufferOnly = (m_depthStencilFormat.order == tcu::TextureFormat::S);
+ const int actualSamples = (formatClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || formatClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)? 1: m_numSamples;
+
+ m_colorBuffer.setStorage(m_colorFormat, actualSamples, m_surfaceWidth, m_surfaceHeight);
+ m_resolveColorBuffer.setStorage(m_colorFormat, m_surfaceWidth, m_surfaceHeight);
+
+ if (formatClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER)
+ {
+ tcu::clear(m_colorBuffer.getAccess(), defaultClearColorInt(m_colorFormat));
+ tcu::clear(m_resolveColorBuffer.getAccess(), defaultClearColorInt(m_colorFormat));
+ }
+ else if (formatClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)
+ {
+ tcu::clear(m_colorBuffer.getAccess(), defaultClearColorUint(m_colorFormat));
+ tcu::clear(m_resolveColorBuffer.getAccess(), defaultClearColorUint(m_colorFormat));
+ }
+ else
+ {
+ tcu::clear(m_colorBuffer.getAccess(), defaultClearColorFloat(m_colorFormat));
+ tcu::clear(m_resolveColorBuffer.getAccess(), defaultClearColorFloat(m_colorFormat));
+ }
+
+ if (hasDepthStencil)
+ {
+ if (hasDepthBufferOnly)
+ {
+ m_depthStencilBuffer.setStorage(m_depthStencilFormat, actualSamples, surfaceWidth, surfaceHeight);
+ tcu::clearDepth(m_depthStencilBuffer.getAccess(), defaultClearDepth());
+
+ m_renderTarget = new rr::RenderTarget(rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_colorBuffer.getAccess()),
+ rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_depthStencilBuffer.getAccess()));
+ }
+ else if (hasStencilBufferOnly)
+ {
+ m_depthStencilBuffer.setStorage(m_depthStencilFormat, actualSamples, surfaceWidth, surfaceHeight);
+ tcu::clearStencil(m_depthStencilBuffer.getAccess(), defaultClearStencil());
+
+ m_renderTarget = new rr::RenderTarget(rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_colorBuffer.getAccess()),
+ rr::MultisamplePixelBufferAccess(),
+ rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_depthStencilBuffer.getAccess()));
+ }
+ else
+ {
+ m_depthStencilBuffer.setStorage(m_depthStencilFormat, actualSamples, surfaceWidth, surfaceHeight);
+
+ tcu::clearDepth(m_depthStencilBuffer.getAccess(), defaultClearDepth());
+ tcu::clearStencil(m_depthStencilBuffer.getAccess(), defaultClearStencil());
+
+ m_renderTarget = new rr::RenderTarget(rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_colorBuffer.getAccess()),
+ rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_depthStencilBuffer.getAccess()),
+ rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_depthStencilBuffer.getAccess()));
+ }
+ }
+ else
+ {
+ m_renderTarget = new rr::RenderTarget(rr::MultisamplePixelBufferAccess::fromMultisampleAccess(m_colorBuffer.getAccess()));
+ }
+}
+
+ReferenceRenderer::~ReferenceRenderer (void)
+{
+ delete m_renderTarget;
+}
+
+void ReferenceRenderer::draw (const rr::RenderState& renderState,
+ const rr::PrimitiveType primitive,
+ const std::vector<Vertex4RGBA>& vertexBuffer)
+{
+ const rr::PrimitiveList primitives(primitive, (int)vertexBuffer.size(), 0);
+
+ std::vector<tcu::Vec4> positions;
+ std::vector<tcu::Vec4> colors;
+
+ for (size_t vertexNdx = 0; vertexNdx < vertexBuffer.size(); vertexNdx++)
+ {
+ const Vertex4RGBA& v = vertexBuffer[vertexNdx];
+ positions.push_back(v.position);
+ colors.push_back(v.color);
+ }
+
+ rr::VertexAttrib vertexAttribs[2];
+
+ // Position attribute
+ vertexAttribs[0].type = rr::VERTEXATTRIBTYPE_FLOAT;
+ vertexAttribs[0].size = 4;
+ vertexAttribs[0].pointer = positions.data();
+ // UV attribute
+ vertexAttribs[1].type = rr::VERTEXATTRIBTYPE_FLOAT;
+ vertexAttribs[1].size = 4;
+ vertexAttribs[1].pointer = colors.data();
+
+ rr::DrawCommand drawQuadCommand(renderState, *m_renderTarget, *m_program, 2, vertexAttribs, primitives);
+
+ m_renderer.draw(drawQuadCommand);
+}
+
+tcu::PixelBufferAccess ReferenceRenderer::getAccess (void)
+{
+ rr::MultisampleConstPixelBufferAccess multiSampleAccess = rr::MultisampleConstPixelBufferAccess::fromMultisampleAccess(m_colorBuffer.getAccess());
+ rr::resolveMultisampleColorBuffer(m_resolveColorBuffer.getAccess(), multiSampleAccess);
+
+ return m_resolveColorBuffer.getAccess();
+}
+
+const rr::ViewportState ReferenceRenderer::getViewportState (void) const
+{
+ return rr::ViewportState(rr::WindowRectangle(0, 0, m_surfaceWidth, m_surfaceHeight));
+}
+
+} // pipeline
+} // vkt
--- /dev/null
+#ifndef _VKTPIPELINEREFERENCERENDERER_HPP
+#define _VKTPIPELINEREFERENCERENDERER_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Reference renderer.
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vktPipelineVertexUtil.hpp"
+#include "tcuVector.hpp"
+#include "tcuVectorType.hpp"
+#include "tcuTexture.hpp"
+#include "tcuTextureUtil.hpp"
+#include "rrRenderState.hpp"
+#include "rrRenderer.hpp"
+#include <cstring>
+
+namespace vkt
+{
+
+namespace pipeline
+{
+
+class ColorVertexShader : public rr::VertexShader
+{
+public:
+ ColorVertexShader (void) : rr::VertexShader(2, 2)
+ {
+ m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
+ m_inputs[1].type = rr::GENERICVECTYPE_FLOAT;
+
+ m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
+ m_outputs[1].type = rr::GENERICVECTYPE_FLOAT;
+ }
+
+ virtual ~ColorVertexShader (void) {}
+
+ virtual void shadeVertices (const rr::VertexAttrib* inputs,
+ rr::VertexPacket* const* packets,
+ const int numPackets) const
+ {
+ tcu::Vec4 position;
+ tcu::Vec4 color;
+
+ for (int packetNdx = 0; packetNdx < numPackets; packetNdx++)
+ {
+ rr::VertexPacket* const packet = packets[packetNdx];
+
+ readVertexAttrib(position, inputs[0], packet->instanceNdx, packet->vertexNdx);
+ readVertexAttrib(color, inputs[1], packet->instanceNdx, packet->vertexNdx);
+
+ packet->outputs[0] = position;
+ packet->outputs[1] = color;
+ packet->position = position;
+ }
+ }
+};
+
+class ColorFragmentShader : public rr::FragmentShader
+{
+private:
+ const tcu::TextureFormat m_colorFormat;
+ const tcu::TextureFormatInfo m_colorFormatInfo;
+ const tcu::TextureFormat m_depthStencilFormat;
+
+public:
+ ColorFragmentShader (const tcu::TextureFormat& colorFormat,
+ const tcu::TextureFormat& depthStencilFormat)
+ : rr::FragmentShader (2, 1)
+ , m_colorFormat (colorFormat)
+ , m_colorFormatInfo (tcu::getTextureFormatInfo(colorFormat))
+ , m_depthStencilFormat (depthStencilFormat)
+ {
+ const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(m_colorFormat.type);
+
+ m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
+ m_inputs[1].type = rr::GENERICVECTYPE_FLOAT;
+ m_outputs[0].type = (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER)? rr::GENERICVECTYPE_INT32 :
+ (channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)? rr::GENERICVECTYPE_UINT32
+ : rr::GENERICVECTYPE_FLOAT;
+ }
+
+ virtual ~ColorFragmentShader (void) {}
+
+ virtual void shadeFragments (rr::FragmentPacket* packets,
+ const int numPackets,
+ const rr::FragmentShadingContext& context) const
+ {
+ for (int packetNdx = 0; packetNdx < numPackets; packetNdx++)
+ {
+ const rr::FragmentPacket& packet = packets[packetNdx];
+
+ if (m_depthStencilFormat.order == tcu::TextureFormat::D || m_depthStencilFormat.order == tcu::TextureFormat::DS)
+ {
+ for (int fragNdx = 0; fragNdx < 4; fragNdx++)
+ {
+ const tcu::Vec4 vtxPosition = rr::readVarying<float>(packet, context, 0, fragNdx);
+ rr::writeFragmentDepth(context, packetNdx, fragNdx, 0, vtxPosition.z());
+ }
+ }
+
+ for (int fragNdx = 0; fragNdx < 4; fragNdx++)
+ {
+ const tcu::Vec4 vtxColor = rr::readVarying<float>(packet, context, 1, fragNdx);
+
+ rr::writeFragmentOutput(context,
+ packetNdx,
+ fragNdx,
+ 0,
+ (vtxColor - m_colorFormatInfo.lookupBias) / m_colorFormatInfo.lookupScale);
+ }
+ }
+ }
+};
+
+class ReferenceRenderer
+{
+public:
+ ReferenceRenderer (int surfaceWidth,
+ int surfaceHeight,
+ int numSamples,
+ const tcu::TextureFormat& colorFormat,
+ const tcu::TextureFormat& depthStencilFormat,
+ const rr::Program* const program);
+
+ virtual ~ReferenceRenderer (void);
+
+ void draw (const rr::RenderState& renderState,
+ const rr::PrimitiveType primitive,
+ const std::vector<Vertex4RGBA>& vertexBuffer);
+ tcu::PixelBufferAccess getAccess (void);
+ const rr::ViewportState getViewportState (void) const;
+
+private:
+ rr::Renderer m_renderer;
+
+ const int m_surfaceWidth;
+ const int m_surfaceHeight;
+ const int m_numSamples;
+
+ const tcu::TextureFormat m_colorFormat;
+ const tcu::TextureFormat m_depthStencilFormat;
+
+ tcu::TextureLevel m_colorBuffer;
+ tcu::TextureLevel m_resolveColorBuffer;
+ tcu::TextureLevel m_depthStencilBuffer;
+
+ rr::RenderTarget* m_renderTarget;
+ const rr::Program* m_program;
+};
+
+rr::TestFunc mapVkCompareOp (vk::VkCompareOp compareFunc);
+rr::PrimitiveType mapVkPrimitiveTopology (vk::VkPrimitiveTopology primitiveTopology);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINEREFERENCERENDERER_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Pipeline Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineTests.hpp"
+#include "vktPipelineDepthTests.hpp"
+#include "deUniquePtr.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> pipelineTests (new tcu::TestCaseGroup(testCtx, "pipeline", "Pipeline Tests"));
+
+ pipelineTests->addChild(createDepthTests(testCtx));
+
+ return pipelineTests.release();
+}
+
+} // pipeline
+} // vkt
--- /dev/null
+#ifndef _VKTPIPELINETESTS_HPP
+#define _VKTPIPELINETESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Pipeline Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINETESTS_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for vertex buffers.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineVertexUtil.hpp"
+#include "tcuVectorUtil.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+std::vector<Vertex4RGBA> createOverlappingQuads (void)
+{
+ using tcu::Vec2;
+ using tcu::Vec4;
+
+ std::vector<Vertex4RGBA> vertices;
+
+ const Vec2 translations[4] =
+ {
+ Vec2(-0.25f, -0.25f),
+ Vec2(-1.0f, -0.25f),
+ Vec2(-1.0f, -1.0f),
+ Vec2(-0.25f, -1.0f)
+ };
+
+ const Vec4 quadColors[4] =
+ {
+ Vec4(1.0f, 0.0f, 0.0f, 1.0),
+ Vec4(0.0f, 1.0f, 0.0f, 1.0),
+ Vec4(0.0f, 0.0f, 1.0f, 1.0),
+ Vec4(1.0f, 0.0f, 1.0f, 1.0)
+ };
+
+ const float quadSize = 1.25f;
+
+ for (int quadNdx = 0; quadNdx < 4; quadNdx++)
+ {
+ const Vec2& translation = translations[quadNdx];
+ const Vec4& color = quadColors[quadNdx];
+
+ const Vertex4RGBA lowerLeftVertex =
+ {
+ Vec4(translation.x(), translation.y(), 0.0f, 1.0f),
+ color
+ };
+ const Vertex4RGBA upperLeftVertex =
+ {
+ Vec4(translation.x(), translation.y() + quadSize, 0.0f, 1.0f),
+ color
+ };
+ const Vertex4RGBA lowerRightVertex =
+ {
+ Vec4(translation.x() + quadSize, translation.y(), 0.0f, 1.0f),
+ color
+ };
+ const Vertex4RGBA upperRightVertex =
+ {
+ Vec4(translation.x() + quadSize, translation.y() + quadSize, 0.0f, 1.0f),
+ color
+ };
+
+ // Triangle 1, CCW
+ vertices.push_back(lowerLeftVertex);
+ vertices.push_back(lowerRightVertex);
+ vertices.push_back(upperLeftVertex);
+
+ // Triangle 2, CW
+ vertices.push_back(lowerRightVertex);
+ vertices.push_back(upperLeftVertex);
+ vertices.push_back(upperRightVertex);
+ }
+
+ return vertices;
+}
+
+} // pipeline
+} // vkt
--- /dev/null
+#ifndef _VKTPIPELINEVERTEXUTIL_HPP
+#define _VKTPIPELINEVERTEXUTIL_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 The Khronos Group Inc.
+ * Copyright (c) 2015 Imagination Technologies Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utilities for vertex buffers.
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuVectorUtil.hpp"
+
+#include <vector>
+
+namespace vkt
+{
+namespace pipeline
+{
+
+struct Vertex4RGBA
+{
+ tcu::Vec4 position;
+ tcu::Vec4 color;
+};
+
+/*! \brief Creates a pattern of 4 overlapping quads.
+ *
+ * The quads are alined along the plane Z = 0, with X,Y taking values between -1 and 1.
+ * Each quad covers one of the quadrants of the scene and partially extends to the other 3 quadrants.
+ * The triangles of each quad have different winding orders (CW/CCW).
+ */
+std::vector<Vertex4RGBA> createOverlappingQuads (void);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINEVERTEXUTIL_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Utility for pre-compiling source programs to SPIR-V
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuCommandLine.hpp"
+#include "tcuPlatform.hpp"
+#include "tcuResource.hpp"
+#include "tcuTestLog.hpp"
+#include "tcuTestHierarchyIterator.hpp"
+#include "deUniquePtr.hpp"
+#include "vkPrograms.hpp"
+#include "vkBinaryRegistry.hpp"
+#include "vktTestCase.hpp"
+#include "vktTestPackage.hpp"
+#include "deUniquePtr.hpp"
+#include "deCommandLine.hpp"
+
+#include <iostream>
+
+using std::vector;
+using std::string;
+using de::UniquePtr;
+using de::MovePtr;
+
+namespace vkt
+{
+
+tcu::TestPackageRoot* createRoot (tcu::TestContext& testCtx)
+{
+ vector<tcu::TestNode*> children;
+ children.push_back(new TestPackage(testCtx));
+ return new tcu::TestPackageRoot(testCtx, children);
+}
+
+enum BuildMode
+{
+ BUILDMODE_BUILD = 0,
+ BUILDMODE_VERIFY,
+
+ BUILDMODE_LAST
+};
+
+struct BuildStats
+{
+ int numSucceeded;
+ int numFailed;
+
+ BuildStats (void)
+ : numSucceeded (0)
+ , numFailed (0)
+ {
+ }
+};
+
+namespace // anonymous
+{
+
+vk::ProgramBinary* compileProgram (const glu::ProgramSources& source, glu::ShaderProgramInfo* buildInfo)
+{
+ return vk::buildProgram(source, vk::PROGRAM_FORMAT_SPIRV, buildInfo);
+}
+
+vk::ProgramBinary* compileProgram (const vk::SpirVAsmSource& source, vk::SpirVProgramInfo* buildInfo)
+{
+ return vk::assembleProgram(source, buildInfo);
+}
+
+void writeVerboseLogs (const glu::ShaderProgramInfo& buildInfo)
+{
+ for (size_t shaderNdx = 0; shaderNdx < buildInfo.shaders.size(); shaderNdx++)
+ {
+ const glu::ShaderInfo& shaderInfo = buildInfo.shaders[shaderNdx];
+ const char* const shaderName = getShaderTypeName(shaderInfo.type);
+
+ tcu::print("%s source:\n---\n%s\n---\n", shaderName, shaderInfo.source.c_str());
+ tcu::print("%s compile log:\n---\n%s\n---\n", shaderName, shaderInfo.infoLog.c_str());
+ }
+}
+
+void writeVerboseLogs (const vk::SpirVProgramInfo& buildInfo)
+{
+ tcu::print("source:\n---\n%s\n---\n", buildInfo.source->program.str().c_str());
+ tcu::print("compile log:\n---\n%s\n---\n", buildInfo.infoLog.c_str());
+}
+
+template <typename InfoType, typename IteratorType>
+void buildProgram (const std::string& casePath,
+ bool printLogs,
+ IteratorType iter,
+ BuildMode mode,
+ BuildStats* stats,
+ vk::BinaryRegistryReader* reader,
+ vk::BinaryRegistryWriter* writer)
+{
+ InfoType buildInfo;
+ try
+ {
+ const vk::ProgramIdentifier progId (casePath, iter.getName());
+ const UniquePtr<vk::ProgramBinary> binary (compileProgram(iter.getProgram(), &buildInfo));
+
+ if (mode == BUILDMODE_BUILD)
+ writer->storeProgram(progId, *binary);
+ else
+ {
+ DE_ASSERT(mode == BUILDMODE_VERIFY);
+
+ const UniquePtr<vk::ProgramBinary> storedBinary (reader->loadProgram(progId));
+
+ if (binary->getSize() != storedBinary->getSize())
+ throw tcu::Exception("Binary size doesn't match");
+
+ if (deMemCmp(binary->getBinary(), storedBinary->getBinary(), binary->getSize()))
+ throw tcu::Exception("Binary contents don't match");
+ }
+
+ tcu::print(" OK: %s\n", iter.getName().c_str());
+ stats->numSucceeded += 1;
+ }
+ catch (const std::exception& e)
+ {
+ tcu::print(" ERROR: %s: %s\n", iter.getName().c_str(), e.what());
+ if (printLogs)
+ {
+ writeVerboseLogs(buildInfo);
+ }
+ stats->numFailed += 1;
+ }
+}
+
+} // anonymous
+BuildStats buildPrograms (tcu::TestContext& testCtx, const std::string& dstPath, BuildMode mode, bool verbose)
+{
+ const UniquePtr<tcu::TestPackageRoot> root (createRoot(testCtx));
+ tcu::DefaultHierarchyInflater inflater (testCtx);
+ tcu::TestHierarchyIterator iterator (*root, inflater, testCtx.getCommandLine());
+ const tcu::DirArchive srcArchive (dstPath.c_str());
+ UniquePtr<vk::BinaryRegistryWriter> writer (mode == BUILDMODE_BUILD ? new vk::BinaryRegistryWriter(dstPath) : DE_NULL);
+ UniquePtr<vk::BinaryRegistryReader> reader (mode == BUILDMODE_VERIFY ? new vk::BinaryRegistryReader(srcArchive, "") : DE_NULL);
+ BuildStats stats;
+ const bool printLogs = verbose;
+
+ while (iterator.getState() != tcu::TestHierarchyIterator::STATE_FINISHED)
+ {
+ if (iterator.getState() == tcu::TestHierarchyIterator::STATE_ENTER_NODE &&
+ tcu::isTestNodeTypeExecutable(iterator.getNode()->getNodeType()))
+ {
+ const TestCase* const testCase = dynamic_cast<TestCase*>(iterator.getNode());
+ const string casePath = iterator.getNodePath();
+ vk::SourceCollections progs;
+
+ tcu::print("%s\n", casePath.c_str());
+
+ testCase->initPrograms(progs);
+
+ for (vk::GlslSourceCollection::Iterator progIter = progs.glslSources.begin(); progIter != progs.glslSources.end(); ++progIter)
+ {
+ buildProgram<glu::ShaderProgramInfo, vk::GlslSourceCollection::Iterator>(casePath, printLogs, progIter, mode, &stats, reader.get(), writer.get());
+ }
+
+ for (vk::SpirVAsmCollection::Iterator progIter = progs.spirvAsmSources.begin(); progIter != progs.spirvAsmSources.end(); ++progIter)
+ {
+ buildProgram<vk::SpirVProgramInfo, vk::SpirVAsmCollection::Iterator>(casePath, printLogs, progIter, mode, &stats, reader.get(), writer.get());
+ }
+ }
+
+ iterator.next();
+ }
+
+ return stats;
+}
+
+} // vkt
+
+namespace opt
+{
+
+DE_DECLARE_COMMAND_LINE_OPT(DstPath, std::string);
+DE_DECLARE_COMMAND_LINE_OPT(Mode, vkt::BuildMode);
+DE_DECLARE_COMMAND_LINE_OPT(Verbose, bool);
+DE_DECLARE_COMMAND_LINE_OPT(Cases, std::string);
+
+} // opt
+
+void registerOptions (de::cmdline::Parser& parser)
+{
+ using de::cmdline::Option;
+ using de::cmdline::NamedValue;
+
+ static const NamedValue<vkt::BuildMode> s_modes[] =
+ {
+ { "build", vkt::BUILDMODE_BUILD },
+ { "verify", vkt::BUILDMODE_VERIFY }
+ };
+
+ parser << Option<opt::DstPath> ("d", "dst-path", "Destination path", "out")
+ << Option<opt::Mode> ("m", "mode", "Build mode", s_modes, "build")
+ << Option<opt::Verbose> ("v", "verbose", "Verbose output")
+ << Option<opt::Cases> ("n", "deqp-case", "Case path filter (works as in test binaries)");
+}
+
+int main (int argc, const char* argv[])
+{
+ de::cmdline::CommandLine cmdLine;
+ tcu::CommandLine deqpCmdLine;
+
+ {
+ de::cmdline::Parser parser;
+ registerOptions(parser);
+ if (!parser.parse(argc, argv, &cmdLine, std::cerr))
+ {
+ parser.help(std::cout);
+ return -1;
+ }
+ }
+
+ {
+ vector<const char*> deqpArgv;
+
+ deqpArgv.push_back("unused");
+
+ if (cmdLine.hasOption<opt::Cases>())
+ {
+ deqpArgv.push_back("--deqp-case");
+ deqpArgv.push_back(cmdLine.getOption<opt::Cases>().c_str());
+ }
+
+ if (!deqpCmdLine.parse((int)deqpArgv.size(), &deqpArgv[0]))
+ return -1;
+ }
+
+ try
+ {
+ tcu::DirArchive archive (".");
+ tcu::TestLog log (deqpCmdLine.getLogFileName(), deqpCmdLine.getLogFlags());
+ tcu::Platform platform;
+ tcu::TestContext testCtx (platform, archive, log, deqpCmdLine, DE_NULL);
+
+ const vkt::BuildStats stats = vkt::buildPrograms(testCtx,
+ cmdLine.getOption<opt::DstPath>(),
+ cmdLine.getOption<opt::Mode>(),
+ cmdLine.getOption<opt::Verbose>());
+
+ tcu::print("DONE: %d passed, %d failed\n", stats.numSucceeded, stats.numFailed);
+
+ return stats.numFailed == 0 ? 0 : -1;
+ }
+ catch (const std::exception& e)
+ {
+ tcu::die("%s", e.what());
+ }
+}
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Platform information tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktInfo.hpp"
+
+#include "vktTestCaseUtil.hpp"
+
+#include "vkPlatform.hpp"
+#include "vkStrUtil.hpp"
+#include "vkRef.hpp"
+#include "vkDeviceUtil.hpp"
+#include "vkQueryUtil.hpp"
+
+#include "tcuTestLog.hpp"
+#include "tcuFormatUtil.hpp"
+
+#include "deUniquePtr.hpp"
+
+namespace vkt
+{
+namespace
+{
+
+using namespace vk;
+using std::vector;
+using std::string;
+using tcu::TestLog;
+
+tcu::TestStatus enumeratePhysicalDevices (Context& context)
+{
+ TestLog& log = context.getTestContext().getLog();
+ const vector<VkPhysicalDevice> devices = vk::enumeratePhysicalDevices(context.getInstanceInterface(), context.getInstance());
+
+ log << TestLog::Integer("NumDevices", "Number of devices", "", QP_KEY_TAG_NONE, deInt64(devices.size()));
+
+ for (size_t ndx = 0; ndx < devices.size(); ndx++)
+ log << TestLog::Message << ndx << ": " << devices[ndx] << TestLog::EndMessage;
+
+ return tcu::TestStatus::pass("Enumerating devices succeeded");
+}
+
+tcu::TestStatus queueProperties (Context& context)
+{
+ TestLog& log = context.getTestContext().getLog();
+ const vector<VkPhysicalDeviceQueueProperties> queueProperties = getPhysicalDeviceQueueProperties(context.getInstanceInterface(), context.getPhysicalDevice());
+
+ log << TestLog::Message << "device = " << context.getPhysicalDevice() << TestLog::EndMessage;
+
+ for (size_t queueNdx = 0; queueNdx < queueProperties.size(); queueNdx++)
+ log << TestLog::Message << queueNdx << ": " << queueProperties[queueNdx] << TestLog::EndMessage;
+
+ return tcu::TestStatus::pass("Querying queue properties succeeded");
+}
+
+tcu::TestStatus memoryProperties (Context& context)
+{
+ TestLog& log = context.getTestContext().getLog();
+
+ log << TestLog::Message << "device = " << context.getPhysicalDevice() << TestLog::EndMessage;
+
+ log << TestLog::Message
+ << getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice())
+ << TestLog::EndMessage;
+
+ return tcu::TestStatus::pass("Querying memory properties succeeded");
+}
+
+} // anonymous
+
+tcu::TestCaseGroup* createInfoTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> infoTests (new tcu::TestCaseGroup(testCtx, "info", "Platform Information Tests"));
+
+ addFunctionCase(infoTests.get(), "physical_devices", "Physical devices", enumeratePhysicalDevices);
+ addFunctionCase(infoTests.get(), "queue_properties", "Queue properties", queueProperties);
+ addFunctionCase(infoTests.get(), "memory_properties", "Memory properties", memoryProperties);
+
+ return infoTests.release();
+}
+
+} // vkt
--- /dev/null
+#ifndef _VKTINFO_HPP
+#define _VKTINFO_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Platform information tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+
+tcu::TestCaseGroup* createInfoTests (tcu::TestContext& testCtx);
+
+} // vkt
+
+#endif // _VKTINFO_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan test case base classes
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestCase.hpp"
+
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkDeviceUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkPlatform.hpp"
+
+#include "deMemory.h"
+
+namespace vkt
+{
+
+// Default device utilities
+
+using std::vector;
+using namespace vk;
+
+static deUint32 findQueueFamilyIndexWithCaps (const InstanceInterface& vkInstance, VkPhysicalDevice physicalDevice, VkQueueFlags requiredCaps)
+{
+ const vector<VkPhysicalDeviceQueueProperties> queueProps = getPhysicalDeviceQueueProperties(vkInstance, physicalDevice);
+
+ for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
+ {
+ if ((queueProps[queueNdx].queueFlags & requiredCaps) == requiredCaps)
+ return (deUint32)queueNdx;
+ }
+
+ TCU_THROW(NotSupportedError, "No matching queue found");
+}
+
+struct DeviceCreateInfoHelper
+{
+ VkPhysicalDeviceFeatures enabledFeatures;
+ VkDeviceQueueCreateInfo queueInfo;
+ VkDeviceCreateInfo deviceInfo;
+
+ DeviceCreateInfoHelper (deUint32 queueIndex)
+ {
+ deMemset(&enabledFeatures, 0, sizeof(enabledFeatures));
+ deMemset(&queueInfo, 0, sizeof(queueInfo));
+ deMemset(&deviceInfo, 0, sizeof(deviceInfo));
+
+ // \todo [2015-07-09 pyry] What's the policy for enabling features?
+ // * Enable all supported by default, and expose that to test cases
+ // * More limited enabled set could be used for verifying that tests behave correctly
+
+ queueInfo.queueFamilyIndex = queueIndex;
+ queueInfo.queueCount = 1u;
+
+ deviceInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ deviceInfo.pNext = DE_NULL;
+ deviceInfo.queueRecordCount = 1u;
+ deviceInfo.pRequestedQueues = &queueInfo;
+ deviceInfo.extensionCount = 0u;
+ deviceInfo.ppEnabledExtensionNames = DE_NULL;
+ deviceInfo.pEnabledFeatures = &enabledFeatures;
+ deviceInfo.flags = 0u;
+ }
+};
+
+class DefaultDevice
+{
+public:
+ DefaultDevice (const PlatformInterface& vkPlatform, const tcu::CommandLine& cmdLine);
+ ~DefaultDevice (void);
+
+ VkInstance getInstance (void) const { return *m_instance; }
+ const InstanceInterface& getInstanceInterface (void) const { return m_instanceInterface; }
+
+ VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
+
+ VkDevice getDevice (void) const { return *m_device; }
+ const DeviceInterface& getDeviceInterface (void) const { return m_deviceInterface; }
+
+ deUint32 getUniversalQueueFamilyIndex (void) const { return m_universalQueueFamilyIndex; }
+ VkQueue getUniversalQueue (void) const;
+
+private:
+ const Unique<VkInstance> m_instance;
+ const InstanceDriver m_instanceInterface;
+
+ const VkPhysicalDevice m_physicalDevice;
+
+ const deUint32 m_universalQueueFamilyIndex;
+ const DeviceCreateInfoHelper m_deviceCreateInfo;
+
+ const Unique<VkDevice> m_device;
+ const DeviceDriver m_deviceInterface;
+};
+
+DefaultDevice::DefaultDevice (const PlatformInterface& vkPlatform, const tcu::CommandLine& cmdLine)
+ : m_instance (createDefaultInstance(vkPlatform))
+ , m_instanceInterface (vkPlatform, *m_instance)
+ , m_physicalDevice (chooseDevice(m_instanceInterface, *m_instance, cmdLine))
+ , m_universalQueueFamilyIndex (findQueueFamilyIndexWithCaps(m_instanceInterface, m_physicalDevice, VK_QUEUE_GRAPHICS_BIT|VK_QUEUE_COMPUTE_BIT))
+ , m_deviceCreateInfo (m_universalQueueFamilyIndex)
+ , m_device (createDevice(m_instanceInterface, m_physicalDevice, &m_deviceCreateInfo.deviceInfo))
+ , m_deviceInterface (m_instanceInterface, *m_device)
+{
+}
+
+DefaultDevice::~DefaultDevice (void)
+{
+}
+
+VkQueue DefaultDevice::getUniversalQueue (void) const
+{
+ VkQueue queue = 0;
+ VK_CHECK(m_deviceInterface.getDeviceQueue(*m_device, m_universalQueueFamilyIndex, 0, &queue));
+ return queue;
+}
+
+// Allocator utilities
+
+vk::Allocator* createAllocator (DefaultDevice* device)
+{
+ const VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(device->getInstanceInterface(), device->getPhysicalDevice());
+
+ // \todo [2015-07-24 jarkko] support allocator selection/configuration from command line (or compile time)
+ return new SimpleAllocator(device->getDeviceInterface(), device->getDevice(), memoryProperties);
+}
+
+// Context
+
+Context::Context (tcu::TestContext& testCtx,
+ const vk::PlatformInterface& platformInterface,
+ vk::ProgramCollection<vk::ProgramBinary>& progCollection)
+ : m_testCtx (testCtx)
+ , m_platformInterface (platformInterface)
+ , m_progCollection (progCollection)
+ , m_device (new DefaultDevice(m_platformInterface, testCtx.getCommandLine()))
+ , m_allocator (createAllocator(m_device.get()))
+{
+}
+
+Context::~Context (void)
+{
+}
+
+vk::VkInstance Context::getInstance (void) const { return m_device->getInstance(); }
+const vk::InstanceInterface& Context::getInstanceInterface (void) const { return m_device->getInstanceInterface(); }
+vk::VkPhysicalDevice Context::getPhysicalDevice (void) const { return m_device->getPhysicalDevice(); }
+vk::VkDevice Context::getDevice (void) const { return m_device->getDevice(); }
+const vk::DeviceInterface& Context::getDeviceInterface (void) const { return m_device->getDeviceInterface(); }
+deUint32 Context::getUniversalQueueFamilyIndex (void) const { return m_device->getUniversalQueueFamilyIndex(); }
+vk::VkQueue Context::getUniversalQueue (void) const { return m_device->getUniversalQueue(); }
+vk::Allocator& Context::getDefaultAllocator (void) const { return *m_allocator; }
+
+// TestCase
+
+void TestCase::initPrograms (SourceCollections&) const
+{
+}
+
+} // vkt
--- /dev/null
+#ifndef _VKTTESTCASE_HPP
+#define _VKTTESTCASE_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan test case base classes
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+#include "vkDefs.hpp"
+#include "deUniquePtr.hpp"
+
+namespace glu
+{
+struct ProgramSources;
+}
+
+namespace vk
+{
+class PlatformInterface;
+class ProgramBinary;
+template<typename Program> class ProgramCollection;
+class Allocator;
+struct SourceCollections;
+}
+
+namespace vkt
+{
+
+class DefaultDevice;
+
+class Context
+{
+public:
+ Context (tcu::TestContext& testCtx,
+ const vk::PlatformInterface& platformInterface,
+ vk::ProgramCollection<vk::ProgramBinary>& progCollection);
+ ~Context (void);
+
+ tcu::TestContext& getTestContext (void) const { return m_testCtx; }
+ const vk::PlatformInterface& getPlatformInterface (void) const { return m_platformInterface; }
+ vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_progCollection; }
+
+ // Default instance & device, selected with --deqp-vk-device-id=N
+ vk::VkInstance getInstance (void) const;
+ const vk::InstanceInterface& getInstanceInterface (void) const;
+ vk::VkPhysicalDevice getPhysicalDevice (void) const;
+ vk::VkDevice getDevice (void) const;
+ const vk::DeviceInterface& getDeviceInterface (void) const;
+ deUint32 getUniversalQueueFamilyIndex (void) const;
+ vk::VkQueue getUniversalQueue (void) const;
+
+ vk::Allocator& getDefaultAllocator (void) const;
+
+protected:
+ tcu::TestContext& m_testCtx;
+ const vk::PlatformInterface& m_platformInterface;
+ vk::ProgramCollection<vk::ProgramBinary>& m_progCollection;
+
+ const de::UniquePtr<DefaultDevice> m_device;
+ const de::UniquePtr<vk::Allocator> m_allocator;
+
+private:
+ Context (const Context&); // Not allowed
+ Context& operator= (const Context&); // Not allowed
+};
+
+
+class TestInstance;
+
+class TestCase : public tcu::TestCase
+{
+public:
+ TestCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description);
+ TestCase (tcu::TestContext& testCtx, tcu::TestNodeType type, const std::string& name, const std::string& description);
+ virtual ~TestCase (void) {}
+
+ virtual void initPrograms (vk::SourceCollections& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const = 0;
+
+ IterateResult iterate (void) { DE_ASSERT(false); return STOP; } // Deprecated in this module
+};
+
+class TestInstance
+{
+public:
+ TestInstance (Context& context) : m_context(context) {}
+ virtual ~TestInstance (void) {}
+
+ virtual tcu::TestStatus iterate (void) = 0;
+
+protected:
+ Context& m_context;
+};
+
+inline TestCase::TestCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description)
+ : tcu::TestCase(testCtx, name.c_str(), description.c_str())
+{
+}
+
+inline TestCase::TestCase (tcu::TestContext& testCtx, tcu::TestNodeType type, const std::string& name, const std::string& description)
+ : tcu::TestCase(testCtx, type, name.c_str(), description.c_str())
+{
+}
+
+} // vkt
+
+#endif // _VKTTESTCASE_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief TestCase utilities
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestCaseUtil.hpp"
+
+DE_EMPTY_CPP_FILE
--- /dev/null
+#ifndef _VKTTESTCASEUTIL_HPP
+#define _VKTTESTCASEUTIL_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief TestCase utilities
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "vktTestCase.hpp"
+
+namespace vkt
+{
+
+template<typename Arg0>
+struct NoPrograms1
+{
+ void init (vk::SourceCollections&, Arg0) const {}
+};
+
+template<typename Instance, typename Arg0, typename Programs = NoPrograms1<Arg0> >
+class InstanceFactory1 : public TestCase
+{
+public:
+ InstanceFactory1 (tcu::TestContext& testCtx, tcu::TestNodeType type, const std::string& name, const std::string& desc, const Arg0& arg0)
+ : TestCase (testCtx, type, name, desc)
+ , m_progs ()
+ , m_arg0 (arg0)
+ {}
+
+ InstanceFactory1 (tcu::TestContext& testCtx, tcu::TestNodeType type, const std::string& name, const std::string& desc, const Programs& progs, const Arg0& arg0)
+ : TestCase (testCtx, type, name, desc)
+ , m_progs (progs)
+ , m_arg0 (arg0)
+ {}
+
+ void initPrograms (vk::SourceCollections& dst) const { m_progs.init(dst, m_arg0); }
+ TestInstance* createInstance (Context& context) const { return new Instance(context, m_arg0); }
+
+private:
+ const Programs m_progs;
+ const Arg0 m_arg0;
+};
+
+class FunctionInstance0 : public TestInstance
+{
+public:
+ typedef tcu::TestStatus (*Function) (Context& context);
+
+ FunctionInstance0 (Context& context, Function function)
+ : TestInstance (context)
+ , m_function (function)
+ {}
+
+ tcu::TestStatus iterate (void) { return m_function(m_context); }
+
+private:
+ const Function m_function;
+};
+
+template<typename Arg0>
+class FunctionInstance1 : public TestInstance
+{
+public:
+ typedef tcu::TestStatus (*Function) (Context& context, Arg0 arg0);
+
+ struct Args
+ {
+ Args (Function func_, Arg0 arg0_) : func(func_), arg0(arg0_) {}
+
+ Function func;
+ Arg0 arg0;
+ };
+
+ FunctionInstance1 (Context& context, const Args& args)
+ : TestInstance (context)
+ , m_args (args)
+ {}
+
+ tcu::TestStatus iterate (void) { return m_args.func(m_context, m_args.arg0); }
+
+private:
+ const Args m_args;
+};
+
+class FunctionPrograms0
+{
+public:
+ typedef void (*Function) (vk::SourceCollections& dst);
+
+ FunctionPrograms0 (Function func)
+ : m_func(func)
+ {}
+
+ void init (vk::SourceCollections& dst, FunctionInstance0::Function) const { m_func(dst); }
+
+private:
+ const Function m_func;
+};
+
+template<typename Arg0>
+class FunctionPrograms1
+{
+public:
+ typedef void (*Function) (vk::SourceCollections& dst, Arg0 arg0);
+
+ FunctionPrograms1 (Function func)
+ : m_func(func)
+ {}
+
+ void init (vk::SourceCollections& dst, const typename FunctionInstance1<Arg0>::Args& args) const { m_func(dst, args.arg0); }
+
+private:
+ const Function m_func;
+};
+
+// createFunctionCase
+
+inline TestCase* createFunctionCase (tcu::TestContext& testCtx,
+ tcu::TestNodeType type,
+ const std::string& name,
+ const std::string& desc,
+ FunctionInstance0::Function testFunction)
+{
+ return new InstanceFactory1<FunctionInstance0, FunctionInstance0::Function>(testCtx, type, name, desc, testFunction);
+}
+
+inline TestCase* createFunctionCaseWithPrograms (tcu::TestContext& testCtx,
+ tcu::TestNodeType type,
+ const std::string& name,
+ const std::string& desc,
+ FunctionPrograms0::Function initPrograms,
+ FunctionInstance0::Function testFunction)
+{
+ return new InstanceFactory1<FunctionInstance0, FunctionInstance0::Function, FunctionPrograms0>(
+ testCtx, type, name, desc, FunctionPrograms0(initPrograms), testFunction);
+}
+
+template<typename Arg0>
+TestCase* createFunctionCase (tcu::TestContext& testCtx,
+ tcu::TestNodeType type,
+ const std::string& name,
+ const std::string& desc,
+ typename FunctionInstance1<Arg0>::Function testFunction,
+ Arg0 arg0)
+{
+ return new InstanceFactory1<FunctionInstance1<Arg0>, typename FunctionInstance1<Arg0>::Args>(
+ testCtx, type, name, desc, typename FunctionInstance1<Arg0>::Args(testFunction, arg0));
+}
+
+template<typename Arg0>
+TestCase* createFunctionCaseWithPrograms (tcu::TestContext& testCtx,
+ tcu::TestNodeType type,
+ const std::string& name,
+ const std::string& desc,
+ typename FunctionPrograms1<Arg0>::Function initPrograms,
+ typename FunctionInstance1<Arg0>::Function testFunction,
+ Arg0 arg0)
+{
+ return new InstanceFactory1<FunctionInstance1<Arg0>, typename FunctionInstance1<Arg0>::Args, FunctionPrograms1<Arg0> >(
+ testCtx, type, name, desc, FunctionPrograms1<Arg0>(initPrograms), typename FunctionInstance1<Arg0>::Args(testFunction, arg0));
+}
+
+// addFunctionCase
+
+inline void addFunctionCase (tcu::TestCaseGroup* group,
+ const std::string& name,
+ const std::string& desc,
+ FunctionInstance0::Function testFunc)
+{
+ group->addChild(createFunctionCase(group->getTestContext(), tcu::NODETYPE_SELF_VALIDATE, name, desc, testFunc));
+}
+
+inline void addFunctionCaseWithPrograms (tcu::TestCaseGroup* group,
+ const std::string& name,
+ const std::string& desc,
+ FunctionPrograms0::Function initPrograms,
+ FunctionInstance0::Function testFunc)
+{
+ group->addChild(createFunctionCaseWithPrograms(group->getTestContext(), tcu::NODETYPE_SELF_VALIDATE, name, desc, initPrograms, testFunc));
+}
+
+template<typename Arg0>
+void addFunctionCase (tcu::TestCaseGroup* group,
+ const std::string& name,
+ const std::string& desc,
+ typename FunctionInstance1<Arg0>::Function testFunc,
+ Arg0 arg0)
+{
+ group->addChild(createFunctionCase<Arg0>(group->getTestContext(), tcu::NODETYPE_SELF_VALIDATE, name, desc, testFunc, arg0));
+}
+
+template<typename Arg0>
+void addFunctionCase (tcu::TestCaseGroup* group,
+ tcu::TestNodeType type,
+ const std::string& name,
+ const std::string& desc,
+ typename FunctionInstance1<Arg0>::Function testFunc,
+ Arg0 arg0)
+{
+ group->addChild(createFunctionCase<Arg0>(group->getTestContext(), type, name, desc, testFunc, arg0));
+}
+
+template<typename Arg0>
+void addFunctionCaseWithPrograms (tcu::TestCaseGroup* group,
+ const std::string& name,
+ const std::string& desc,
+ typename FunctionPrograms1<Arg0>::Function initPrograms,
+ typename FunctionInstance1<Arg0>::Function testFunc,
+ Arg0 arg0)
+{
+ group->addChild(createFunctionCaseWithPrograms<Arg0>(group->getTestContext(), tcu::NODETYPE_SELF_VALIDATE, name, desc, initPrograms, testFunc, arg0));
+}
+
+template<typename Arg0>
+void addFunctionCaseWithPrograms (tcu::TestCaseGroup* group,
+ tcu::TestNodeType type,
+ const std::string& name,
+ const std::string& desc,
+ typename FunctionPrograms1<Arg0>::Function initPrograms,
+ typename FunctionInstance1<Arg0>::Function testFunc,
+ Arg0 arg0)
+{
+ group->addChild(createFunctionCaseWithPrograms<Arg0>(group->getTestContext(), type, name, desc, initPrograms, testFunc, arg0));
+}
+
+} // vkt
+
+#endif // _VKTTESTCASEUTIL_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan Test Package
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestPackage.hpp"
+
+#include "tcuPlatform.hpp"
+#include "tcuTestCase.hpp"
+#include "tcuTestLog.hpp"
+
+#include "vkPlatform.hpp"
+#include "vkPrograms.hpp"
+#include "vkBinaryRegistry.hpp"
+#include "vkGlslToSpirV.hpp"
+#include "vkSpirVAsm.hpp"
+
+#include "deUniquePtr.hpp"
+
+#include "vktInfo.hpp"
+#include "vktApiTests.hpp"
+#include "vktPipelineTests.hpp"
+#include "vktBindingModelTests.hpp"
+
+#include <vector>
+#include <sstream>
+
+namespace // compilation
+{
+
+vk::ProgramBinary* compileProgram (const glu::ProgramSources& source, glu::ShaderProgramInfo* buildInfo)
+{
+ return vk::buildProgram(source, vk::PROGRAM_FORMAT_SPIRV, buildInfo);
+}
+
+vk::ProgramBinary* compileProgram (const vk::SpirVAsmSource& source, vk::SpirVProgramInfo* buildInfo)
+{
+ return vk::assembleProgram(source, buildInfo);
+}
+
+template <typename InfoType, typename IteratorType>
+vk::ProgramBinary* buildProgram (const std::string& casePath, IteratorType iter, vkt::Context* context, vk::BinaryCollection* progCollection)
+{
+ tcu::TestLog& log = context->getTestContext().getLog();
+ const vk::ProgramIdentifier progId (casePath, iter.getName());
+ const tcu::ScopedLogSection progSection (log, iter.getName(), "Program: " + iter.getName());
+ de::MovePtr<vk::ProgramBinary> binProg;
+ InfoType buildInfo;
+
+ try
+ {
+ binProg = de::MovePtr<vk::ProgramBinary>(compileProgram(iter.getProgram(), &buildInfo));
+ log << buildInfo;
+ }
+ catch (const tcu::NotSupportedError& err)
+ {
+ // Try to load from cache
+ const vk::BinaryRegistryReader registry (context->getTestContext().getArchive(), "vulkan/prebuilt");
+
+ log << err << tcu::TestLog::Message << "Building from source not supported, loading stored binary instead" << tcu::TestLog::EndMessage;
+
+ binProg = de::MovePtr<vk::ProgramBinary>(registry.loadProgram(progId));
+
+ log << iter.getProgram();
+ }
+ catch (const tcu::Exception&)
+ {
+ // Build failed for other reason
+ log << buildInfo;
+ throw;
+ }
+
+ TCU_CHECK_INTERNAL(binProg);
+
+ vk::ProgramBinary* returnBinary = binProg.get();
+
+ progCollection->add(progId.programName, binProg);
+
+ return returnBinary;
+}
+
+} // anonymous(compilation)
+
+namespace vkt
+{
+
+using std::vector;
+using de::UniquePtr;
+using de::MovePtr;
+using tcu::TestLog;
+
+// TestCaseExecutor
+
+class TestCaseExecutor : public tcu::TestCaseExecutor
+{
+public:
+ TestCaseExecutor (tcu::TestContext& testCtx);
+ ~TestCaseExecutor (void);
+
+ virtual void init (tcu::TestCase* testCase, const std::string& path);
+ virtual void deinit (tcu::TestCase* testCase);
+
+ virtual tcu::TestNode::IterateResult iterate (tcu::TestCase* testCase);
+
+private:
+ vk::BinaryCollection m_progCollection;
+ de::UniquePtr<vk::Library> m_library;
+ Context m_context;
+
+ TestInstance* m_instance; //!< Current test case instance
+};
+
+static MovePtr<vk::Library> createLibrary (tcu::TestContext& testCtx)
+{
+ return MovePtr<vk::Library>(testCtx.getPlatform().getVulkanPlatform().createLibrary());
+}
+
+TestCaseExecutor::TestCaseExecutor (tcu::TestContext& testCtx)
+ : m_library (createLibrary(testCtx))
+ , m_context (testCtx, m_library->getPlatformInterface(), m_progCollection)
+ , m_instance (DE_NULL)
+{
+}
+
+TestCaseExecutor::~TestCaseExecutor (void)
+{
+ delete m_instance;
+}
+
+void TestCaseExecutor::init (tcu::TestCase* testCase, const std::string& casePath)
+{
+ const TestCase* vktCase = dynamic_cast<TestCase*>(testCase);
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+ vk::SourceCollections sourceProgs;
+
+ DE_UNREF(casePath); // \todo [2015-03-13 pyry] Use this to identify ProgramCollection storage path
+
+ if (!vktCase)
+ TCU_THROW(InternalError, "Test node not an instance of vkt::TestCase");
+
+ m_progCollection.clear();
+ vktCase->initPrograms(sourceProgs);
+
+ for (vk::GlslSourceCollection::Iterator progIter = sourceProgs.glslSources.begin(); progIter != sourceProgs.glslSources.end(); ++progIter)
+ {
+ vk::ProgramBinary* binProg = buildProgram<glu::ShaderProgramInfo, vk::GlslSourceCollection::Iterator>(casePath, progIter, &m_context, &m_progCollection);
+
+ try
+ {
+ std::ostringstream disasm;
+
+ vk::disassembleSpirV(binProg->getSize(), binProg->getBinary(), &disasm);
+
+ log << TestLog::KernelSource(disasm.str());
+ }
+ catch (const tcu::NotSupportedError& err)
+ {
+ log << err;
+ }
+ }
+
+ for (vk::SpirVAsmCollection::Iterator asmIterator = sourceProgs.spirvAsmSources.begin(); asmIterator != sourceProgs.spirvAsmSources.end(); ++asmIterator)
+ {
+ buildProgram<vk::SpirVProgramInfo, vk::SpirVAsmCollection::Iterator>(casePath, asmIterator, &m_context, &m_progCollection);
+ }
+
+ DE_ASSERT(!m_instance);
+ m_instance = vktCase->createInstance(m_context);
+}
+
+void TestCaseExecutor::deinit (tcu::TestCase*)
+{
+ delete m_instance;
+ m_instance = DE_NULL;
+}
+
+tcu::TestNode::IterateResult TestCaseExecutor::iterate (tcu::TestCase*)
+{
+ DE_ASSERT(m_instance);
+
+ const tcu::TestStatus result = m_instance->iterate();
+
+ if (result.isComplete())
+ {
+ // Vulkan tests shouldn't set result directly
+ DE_ASSERT(m_context.getTestContext().getTestResult() == QP_TEST_RESULT_LAST);
+ m_context.getTestContext().setTestResult(result.getCode(), result.getDescription().c_str());
+ return tcu::TestNode::STOP;
+ }
+ else
+ return tcu::TestNode::CONTINUE;
+}
+
+// TestPackage
+
+TestPackage::TestPackage (tcu::TestContext& testCtx)
+ : tcu::TestPackage(testCtx, "dEQP-VK", "dEQP Vulkan Tests")
+{
+}
+
+TestPackage::~TestPackage (void)
+{
+}
+
+tcu::TestCaseExecutor* TestPackage::createExecutor (void) const
+{
+ return new TestCaseExecutor(m_testCtx);
+}
+
+void TestPackage::init (void)
+{
+ addChild(createInfoTests (m_testCtx));
+ addChild(api::createTests (m_testCtx));
+ addChild(pipeline::createTests (m_testCtx));
+ addChild(BindingModel::createTests (m_testCtx));
+}
+
+} // vkt
--- /dev/null
+#ifndef _VKTTESTPACKAGE_HPP
+#define _VKTTESTPACKAGE_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan Test Package
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestPackage.hpp"
+#include "tcuResource.hpp"
+#include "vktTestCase.hpp"
+
+namespace vkt
+{
+
+class TestPackage : public tcu::TestPackage
+{
+public:
+ TestPackage (tcu::TestContext& testCtx);
+ virtual ~TestPackage (void);
+
+ virtual void init (void);
+ tcu::TestCaseExecutor* createExecutor (void) const;
+};
+
+} // vkt
+
+#endif // _VKTTESTPACKAGE_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be
+ * included in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by
+ * Khronos, at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Vulkan Test Package Entry Point.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestPackage.hpp"
+
+// Register package to test executor.
+
+static tcu::TestPackage* createTestPackage (tcu::TestContext& testCtx)
+{
+ return new vkt::TestPackage(testCtx);
+}
+
+tcu::TestPackageDescriptor g_vktPackageDescriptor("dEQP-VK", createTestPackage);
--- /dev/null
+add_subdirectory(vulkan)
DE_DECLARE_COMMAND_LINE_OPT(EGLPixmapType, std::string);
DE_DECLARE_COMMAND_LINE_OPT(LogImages, bool);
DE_DECLARE_COMMAND_LINE_OPT(TestOOM, bool);
+DE_DECLARE_COMMAND_LINE_OPT(VKDeviceID, int);
static void parseIntList (const char* src, std::vector<int>* dst)
{
<< Option<EGLDisplayType> (DE_NULL, "deqp-egl-display-type", "EGL native display type")
<< Option<EGLWindowType> (DE_NULL, "deqp-egl-window-type", "EGL native window type")
<< Option<EGLPixmapType> (DE_NULL, "deqp-egl-pixmap-type", "EGL native pixmap type")
+ << Option<VKDeviceID> (DE_NULL, "deqp-vk-device-id", "Vulkan device ID (IDs start from 1)", "1")
<< Option<LogImages> (DE_NULL, "deqp-log-images", "Enable or disable logging of result images", s_enableNames, "enable")
<< Option<TestOOM> (DE_NULL, "deqp-test-oom", "Run tests that exhaust memory on purpose", s_enableNames, TEST_OOM_DEFAULT);
}
return isOk;
}
-const char* CommandLine::getLogFileName (void) const { return m_cmdLine.getOption<opt::LogFilename>().c_str(); }
-deUint32 CommandLine::getLogFlags (void) const { return m_logFlags; }
-RunMode CommandLine::getRunMode (void) const { return m_cmdLine.getOption<opt::RunMode>(); }
-const char* CommandLine::getCaseListExportFile (void) const { return m_cmdLine.getOption<opt::ExportFilenamePattern>().c_str();}
-WindowVisibility CommandLine::getVisibility (void) const { return m_cmdLine.getOption<opt::Visibility>(); }
-bool CommandLine::isWatchDogEnabled (void) const { return m_cmdLine.getOption<opt::WatchDog>(); }
-bool CommandLine::isCrashHandlingEnabled (void) const { return m_cmdLine.getOption<opt::CrashHandler>(); }
-int CommandLine::getBaseSeed (void) const { return m_cmdLine.getOption<opt::BaseSeed>(); }
-int CommandLine::getTestIterationCount (void) const { return m_cmdLine.getOption<opt::TestIterationCount>(); }
-int CommandLine::getSurfaceWidth (void) const { return m_cmdLine.getOption<opt::SurfaceWidth>(); }
-int CommandLine::getSurfaceHeight (void) const { return m_cmdLine.getOption<opt::SurfaceHeight>(); }
-SurfaceType CommandLine::getSurfaceType (void) const { return m_cmdLine.getOption<opt::SurfaceType>(); }
-ScreenRotation CommandLine::getScreenRotation (void) const { return m_cmdLine.getOption<opt::ScreenRotation>(); }
-int CommandLine::getGLConfigId (void) const { return m_cmdLine.getOption<opt::GLConfigID>(); }
-int CommandLine::getCLPlatformId (void) const { return m_cmdLine.getOption<opt::CLPlatformID>(); }
-const std::vector<int>& CommandLine::getCLDeviceIds (void) const { return m_cmdLine.getOption<opt::CLDeviceIDs>(); }
-bool CommandLine::isOutOfMemoryTestEnabled (void) const { return m_cmdLine.getOption<opt::TestOOM>(); }
+const char* CommandLine::getLogFileName (void) const { return m_cmdLine.getOption<opt::LogFilename>().c_str(); }
+deUint32 CommandLine::getLogFlags (void) const { return m_logFlags; }
+RunMode CommandLine::getRunMode (void) const { return m_cmdLine.getOption<opt::RunMode>(); }
+const char* CommandLine::getCaseListExportFile (void) const { return m_cmdLine.getOption<opt::ExportFilenamePattern>().c_str(); }
+WindowVisibility CommandLine::getVisibility (void) const { return m_cmdLine.getOption<opt::Visibility>(); }
+bool CommandLine::isWatchDogEnabled (void) const { return m_cmdLine.getOption<opt::WatchDog>(); }
+bool CommandLine::isCrashHandlingEnabled (void) const { return m_cmdLine.getOption<opt::CrashHandler>(); }
+int CommandLine::getBaseSeed (void) const { return m_cmdLine.getOption<opt::BaseSeed>(); }
+int CommandLine::getTestIterationCount (void) const { return m_cmdLine.getOption<opt::TestIterationCount>(); }
+int CommandLine::getSurfaceWidth (void) const { return m_cmdLine.getOption<opt::SurfaceWidth>(); }
+int CommandLine::getSurfaceHeight (void) const { return m_cmdLine.getOption<opt::SurfaceHeight>(); }
+SurfaceType CommandLine::getSurfaceType (void) const { return m_cmdLine.getOption<opt::SurfaceType>(); }
+ScreenRotation CommandLine::getScreenRotation (void) const { return m_cmdLine.getOption<opt::ScreenRotation>(); }
+int CommandLine::getGLConfigId (void) const { return m_cmdLine.getOption<opt::GLConfigID>(); }
+int CommandLine::getCLPlatformId (void) const { return m_cmdLine.getOption<opt::CLPlatformID>(); }
+const std::vector<int>& CommandLine::getCLDeviceIds (void) const { return m_cmdLine.getOption<opt::CLDeviceIDs>(); }
+int CommandLine::getVKDeviceId (void) const { return m_cmdLine.getOption<opt::VKDeviceID>(); }
+bool CommandLine::isOutOfMemoryTestEnabled (void) const { return m_cmdLine.getOption<opt::TestOOM>(); }
const char* CommandLine::getGLContextType (void) const
{
//! Get EGL native pixmap factory (--deqp-egl-pixmap-type)
const char* getEGLPixmapType (void) const;
+ //! Get Vulkan device ID (--deqp-vk-device-id)
+ int getVKDeviceId (void) const;
+
//! Should we run tests that exhaust memory (--deqp-test-oom)
bool isOutOfMemoryTestEnabled(void) const;
const glu::Platform& Platform::getGLPlatform (void) const
{
- throw tcu::NotSupportedError("OpenGL (ES) is not supported", DE_NULL, __FILE__, __LINE__);
+ TCU_THROW(NotSupportedError, "OpenGL (ES) is not supported");
}
const eglu::Platform& Platform::getEGLPlatform (void) const
{
- throw tcu::NotSupportedError("EGL is not supported", DE_NULL, __FILE__, __LINE__);
+ TCU_THROW(NotSupportedError, "EGL is not supported");
+}
+
+const vk::Platform& Platform::getVulkanPlatform (void) const
+{
+ TCU_THROW(NotSupportedError, "Vulkan is not supported");
}
} // tcu
class Platform;
}
+namespace vk
+{
+class Platform;
+}
+
namespace tcu
{
* \return Reference to EGL platform interface.
*//*--------------------------------------------------------------------*/
virtual const eglu::Platform& getEGLPlatform (void) const;
+
+ virtual const vk::Platform& getVulkanPlatform (void) const;
};
} // tcu
// combined formats have no single channel class, detect format manually
switch (format.type)
{
+ case tcu::TextureFormat::FLOAT_UNSIGNED_INT_8: return false;
case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV: return false;
+ case tcu::TextureFormat::UNSIGNED_INT_16_8: return true;
case tcu::TextureFormat::UNSIGNED_INT_24_8: return true;
default:
#endif
}
+inline deUint32 readUint24Low8 (const deUint8* src)
+{
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ const deUint32 uint24ByteOffsetBits0To8 = 0; //!< least significant byte in the lowest address
+#else
+ const deUint32 uint24ByteOffsetBits0To8 = 2; //!< least significant byte in the highest address
+#endif
+
+ return src[uint24ByteOffsetBits0To8];
+}
+
+inline void writeUint24Low8 (deUint8* dst, deUint8 val)
+{
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ const deUint32 uint24ByteOffsetBits0To8 = 0; //!< least significant byte in the lowest address
+#else
+ const deUint32 uint24ByteOffsetBits0To8 = 2; //!< least significant byte in the highest address
+#endif
+
+ dst[uint24ByteOffsetBits0To8] = val;
+}
+
+inline deUint32 readUint24High16 (const deUint8* src)
+{
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ return (((deUint32)src[1]) << 0u) |
+ (((deUint32)src[2]) << 8u);
+#else
+ return (((deUint32)src[0]) << 8u) |
+ (((deUint32)src[1]) << 0u);
+#endif
+}
+
+inline void writeUint24High16 (deUint8* dst, deUint16 val)
+{
+#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
+ dst[1] = (deUint8)((val & (deUint16)0x00FFu) >> 0u);
+ dst[2] = (deUint8)((val & (deUint16)0xFF00u) >> 8u);
+#else
+ dst[0] = (deUint8)((val & (deUint16)0xFF00u) >> 8u);
+ dst[1] = (deUint8)((val & (deUint16)0x00FFu) >> 0u);
+#endif
+}
+
inline deUint8 readUint32Low8 (const deUint8* src)
{
#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
int getChannelSize (TextureFormat::ChannelType type)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (type)
{
inline float channelToFloat (const deUint8* value, TextureFormat::ChannelType type)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (type)
{
inline int channelToInt (const deUint8* value, TextureFormat::ChannelType type)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (type)
{
void floatToChannel (deUint8* dst, float src, TextureFormat::ChannelType type)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (type)
{
void intToChannel (deUint8* dst, int src, TextureFormat::ChannelType type)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (type)
{
DE_ASSERT(order == RGBA);
return 4;
}
+ else if (type == UNSIGNED_INT_16_8)
+ {
+ DE_ASSERT(order == D || order == DS);
+ return 3;
+ }
else if (type == UNSIGNED_INT_24_8)
{
DE_ASSERT(order == D || order == DS);
return 4;
}
+ else if (type == FLOAT_UNSIGNED_INT_8)
+ {
+ DE_ASSERT(order == DS);
+ return 5;
+ }
else if (type == FLOAT_UNSIGNED_INT_24_8_REV)
{
DE_ASSERT(order == DS);
const deUint8* const pixelPtr = (const deUint8*)getPixelPtr(x, y, z);
- DE_ASSERT(m_format.order == TextureFormat::DS || m_format.order == TextureFormat::D);
-
switch (m_format.type)
{
- case TextureFormat::UNSIGNED_INT_24_8:
- switch (m_format.order)
- {
- case TextureFormat::D:
- case TextureFormat::DS: // \note Fall-through.
- return (float)readUint32High24(pixelPtr) / 16777215.0f;
+ case TextureFormat::UNSIGNED_INT_16_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ return (float)readUint24High16(pixelPtr) / 65535.0f;
- default:
- DE_ASSERT(false);
- return 0.0f;
- }
+ case TextureFormat::UNSIGNED_INT_24_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ return (float)readUint32High24(pixelPtr) / 16777215.0f;
+ case TextureFormat::FLOAT_UNSIGNED_INT_8:
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
DE_ASSERT(m_format.order == TextureFormat::DS);
return *((const float*)pixelPtr);
switch (m_format.type)
{
+ case TextureFormat::UNSIGNED_INT_16_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ return (int)readUint24Low8(pixelPtr);
+
case TextureFormat::UNSIGNED_INT_24_8:
- switch (m_format.order)
- {
- case TextureFormat::S:
- case TextureFormat::DS:
- return (int)readUint32Low8(pixelPtr);
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ return (int)readUint32Low8(pixelPtr);
- default:
- DE_ASSERT(false);
- return 0;
- }
+ case TextureFormat::FLOAT_UNSIGNED_INT_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ return (int)pixelPtr[4];
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
DE_ASSERT(m_format.order == TextureFormat::DS);
switch (m_format.type)
{
- case TextureFormat::UNSIGNED_INT_24_8:
- switch (m_format.order)
- {
- case TextureFormat::D:
- case TextureFormat::DS:
- writeUint32High24(pixelPtr, convertSatRteUint24(depth * 16777215.0f));
- break;
+ case TextureFormat::UNSIGNED_INT_16_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ writeUint24High16(pixelPtr, convertSatRte<deUint16>(depth * 65535.0f));
+ break;
- default:
- DE_ASSERT(false);
- }
+ case TextureFormat::UNSIGNED_INT_24_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ writeUint32High24(pixelPtr, convertSatRteUint24(depth * 16777215.0f));
break;
+ case TextureFormat::FLOAT_UNSIGNED_INT_8:
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
DE_ASSERT(m_format.order == TextureFormat::DS);
*((float*)pixelPtr) = depth;
switch (m_format.type)
{
+ case TextureFormat::UNSIGNED_INT_16_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ writeUint24Low8(pixelPtr, convertSat<deUint8>((deUint32)stencil));
+ break;
+
case TextureFormat::UNSIGNED_INT_24_8:
- switch (m_format.order)
- {
- case TextureFormat::S:
- case TextureFormat::DS:
- writeUint32Low8(pixelPtr, convertSat<deUint8>((deUint32)stencil));
- break;
+ DE_ASSERT(m_format.order == TextureFormat::DS);
+ writeUint32Low8(pixelPtr, convertSat<deUint8>((deUint32)stencil));
+ break;
- default:
- DE_ASSERT(false);
- }
+ case TextureFormat::FLOAT_UNSIGNED_INT_8:
+ DE_ASSERT(m_format.order == TextureFormat::DS);
break;
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
"UNSIGNED_INT_1010102_REV",
"UNSIGNED_INT_11F_11F_10F_REV",
"UNSIGNED_INT_999_E5_REV",
+ "UNSIGNED_INT_16_8",
"UNSIGNED_INT_24_8",
"SIGNED_INT8",
"SIGNED_INT16",
"UNSIGNED_INT32",
"HALF_FLOAT",
"FLOAT",
+ "FLOAT_UNSIGNED_INT_8",
"FLOAT_UNSIGNED_INT_24_8_REV"
};
UNSIGNED_INT_1010102_REV,
UNSIGNED_INT_11F_11F_10F_REV,
UNSIGNED_INT_999_E5_REV,
+ UNSIGNED_INT_16_8,
UNSIGNED_INT_24_8,
SIGNED_INT8,
SIGNED_INT16,
UNSIGNED_INT32,
HALF_FLOAT,
FLOAT,
+ FLOAT_UNSIGNED_INT_8,
FLOAT_UNSIGNED_INT_24_8_REV,
CHANNELTYPE_LAST
bool isCombinedDepthStencilType (TextureFormat::ChannelType type)
{
// make sure to update this if type table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
- return type == TextureFormat::UNSIGNED_INT_24_8 ||
+ return type == TextureFormat::UNSIGNED_INT_16_8 ||
+ type == TextureFormat::UNSIGNED_INT_24_8 ||
+ type == TextureFormat::FLOAT_UNSIGNED_INT_8 ||
type == TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV;
}
TextureChannelClass getTextureChannelClass (TextureFormat::ChannelType channelType)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (channelType)
{
case TextureFormat::UNSIGNED_INT_1010102_REV: return TEXTURECHANNELCLASS_UNSIGNED_INTEGER;
case TextureFormat::UNSIGNED_INT_11F_11F_10F_REV: return TEXTURECHANNELCLASS_FLOATING_POINT;
case TextureFormat::UNSIGNED_INT_999_E5_REV: return TEXTURECHANNELCLASS_FLOATING_POINT;
+ case TextureFormat::UNSIGNED_INT_16_8: return TEXTURECHANNELCLASS_LAST; //!< packed unorm16-uint8
case TextureFormat::UNSIGNED_INT_24_8: return TEXTURECHANNELCLASS_LAST; //!< packed unorm24-uint8
case TextureFormat::SIGNED_INT8: return TEXTURECHANNELCLASS_SIGNED_INTEGER;
case TextureFormat::SIGNED_INT16: return TEXTURECHANNELCLASS_SIGNED_INTEGER;
case TextureFormat::UNSIGNED_INT32: return TEXTURECHANNELCLASS_UNSIGNED_INTEGER;
case TextureFormat::HALF_FLOAT: return TEXTURECHANNELCLASS_FLOATING_POINT;
case TextureFormat::FLOAT: return TEXTURECHANNELCLASS_FLOATING_POINT;
+ case TextureFormat::FLOAT_UNSIGNED_INT_8: return TEXTURECHANNELCLASS_LAST; //!< packed float32-uint8
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV: return TEXTURECHANNELCLASS_LAST; //!< packed float32-pad24-uint8
default: return TEXTURECHANNELCLASS_LAST;
}
static Vec2 getFloatChannelValueRange (TextureFormat::ChannelType channelType)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
float cMin = 0.0f;
float cMax = 0.0f;
static IVec4 getChannelBitDepth (TextureFormat::ChannelType channelType)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (channelType)
{
case TextureFormat::UNSIGNED_INT24: return IVec4(24);
case TextureFormat::UNSIGNED_INT32: return IVec4(32);
case TextureFormat::UNSIGNED_INT_1010102_REV: return IVec4(10,10,10,2);
+ case TextureFormat::UNSIGNED_INT_16_8: return IVec4(16,8,0,0);
case TextureFormat::UNSIGNED_INT_24_8: return IVec4(24,8,0,0);
case TextureFormat::HALF_FLOAT: return IVec4(16);
case TextureFormat::FLOAT: return IVec4(32);
case TextureFormat::UNSIGNED_INT_11F_11F_10F_REV: return IVec4(11,11,10,0);
case TextureFormat::UNSIGNED_INT_999_E5_REV: return IVec4(9,9,9,0);
+ case TextureFormat::FLOAT_UNSIGNED_INT_8: return IVec4(32,8,0,0);
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV: return IVec4(32,8,0,0);
default:
DE_ASSERT(false);
static IVec4 getChannelMantissaBitDepth (TextureFormat::ChannelType channelType)
{
// make sure this table is updated if format table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
switch (channelType)
{
case TextureFormat::UNSIGNED_INT24:
case TextureFormat::UNSIGNED_INT32:
case TextureFormat::UNSIGNED_INT_1010102_REV:
+ case TextureFormat::UNSIGNED_INT_16_8:
case TextureFormat::UNSIGNED_INT_24_8:
case TextureFormat::UNSIGNED_INT_999_E5_REV:
return getChannelBitDepth(channelType);
case TextureFormat::HALF_FLOAT: return IVec4(10);
case TextureFormat::FLOAT: return IVec4(23);
case TextureFormat::UNSIGNED_INT_11F_11F_10F_REV: return IVec4(6,6,5,0);
+ case TextureFormat::FLOAT_UNSIGNED_INT_8: return IVec4(23,8,0,0);
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV: return IVec4(23,8,0,0);
default:
DE_ASSERT(false);
static AccessType toSamplerAccess (const AccessType& baseAccess, Sampler::DepthStencilMode mode)
{
// make sure to update this if type table is updated
- DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 27);
+ DE_STATIC_ASSERT(TextureFormat::CHANNELTYPE_LAST == 29);
if (!isCombinedDepthStencilType(baseAccess.getFormat().type))
return baseAccess;
else
{
#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
- const deUint32 uint32ByteOffsetBits0To8 = 0; //!< least significant byte in the lowest address
- const deUint32 uint32ByteOffset8To32 = 1;
+ const deUint32 uint24ByteOffsetBits0To8 = 0; //!< least significant byte in the lowest address
+ const deUint32 uint24ByteOffsetBits8To24 = 1;
+ const deUint32 uint32ByteOffsetBits0To8 = 0;
+ const deUint32 uint32ByteOffsetBits8To32 = 1;
#else
- const deUint32 uint32ByteOffsetBits0To8 = 3; //!< least significant byte in the highest address
- const deUint32 uint32ByteOffset8To32 = 0;
+ const deUint32 uint24ByteOffsetBits0To8 = 2; //!< least significant byte in the highest address
+ const deUint32 uint24ByteOffsetBits8To24 = 0;
+ const deUint32 uint32ByteOffsetBits0To8 = 3;
+ const deUint32 uint32ByteOffsetBits8To32 = 0;
#endif
// Sampled channel must exist
// combined formats have multiple channel classes, detect on sampler settings
switch (baseAccess.getFormat().type)
{
+ case TextureFormat::FLOAT_UNSIGNED_INT_8:
+ {
+ if (mode == Sampler::MODE_DEPTH)
+ {
+ // select the float component
+ return AccessType(TextureFormat(TextureFormat::D, TextureFormat::FLOAT),
+ baseAccess.getSize(),
+ baseAccess.getPitch(),
+ baseAccess.getDataPtr());
+ }
+ else if (mode == Sampler::MODE_STENCIL)
+ {
+ // select the uint 8 component
+ return AccessType(TextureFormat(TextureFormat::S, TextureFormat::UNSIGNED_INT8),
+ baseAccess.getSize(),
+ baseAccess.getPitch(),
+ addOffset(baseAccess.getDataPtr(), 4));
+ }
+ else
+ {
+ // unknown sampler mode
+ DE_ASSERT(false);
+ return AccessType();
+ }
+ }
+
case TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
{
if (mode == Sampler::MODE_DEPTH)
}
}
+ case TextureFormat::UNSIGNED_INT_16_8:
+ {
+ if (mode == Sampler::MODE_DEPTH)
+ {
+ // select the unorm16 component
+ return AccessType(TextureFormat(TextureFormat::D, TextureFormat::UNORM_INT16),
+ baseAccess.getSize(),
+ baseAccess.getPitch(),
+ addOffset(baseAccess.getDataPtr(), uint24ByteOffsetBits8To24));
+ }
+ else if (mode == Sampler::MODE_STENCIL)
+ {
+ // select the uint 8 component
+ return AccessType(TextureFormat(TextureFormat::S, TextureFormat::UNSIGNED_INT8),
+ baseAccess.getSize(),
+ baseAccess.getPitch(),
+ addOffset(baseAccess.getDataPtr(), uint24ByteOffsetBits0To8));
+ }
+ else
+ {
+ // unknown sampler mode
+ DE_ASSERT(false);
+ return AccessType();
+ }
+ }
+
case TextureFormat::UNSIGNED_INT_24_8:
{
if (mode == Sampler::MODE_DEPTH)
return AccessType(TextureFormat(TextureFormat::D, TextureFormat::UNORM_INT24),
baseAccess.getSize(),
baseAccess.getPitch(),
- addOffset(baseAccess.getDataPtr(), uint32ByteOffset8To32));
+ addOffset(baseAccess.getDataPtr(), uint32ByteOffsetBits8To32));
}
else if (mode == Sampler::MODE_STENCIL)
{
endif ()
add_library(tcutil-platform STATIC ${TCUTIL_PLATFORM_SRCS})
+
+# Add vkutil to the deps before tcutil so that it picks up the c++11 dependencies
+target_link_libraries(tcutil-platform vkutil)
+
target_link_libraries(tcutil-platform tcutil ${TCUTIL_PLATFORM_LIBS})
# Always link to glutil as some platforms such as Win32 always support GL
#include "deUniquePtr.hpp"
#include "gluPlatform.hpp"
+#include "vkPlatform.hpp"
#include "tcuX11.hpp"
+#include "tcuFunctionLibrary.hpp"
#if defined (DEQP_SUPPORT_GLX)
# include "tcuX11GlxPlatform.hpp"
}
};
+class VulkanLibrary : public vk::Library
+{
+public:
+ VulkanLibrary (void)
+ : m_library ("libvulkan.so")
+ , m_driver (m_library)
+ {
+ }
+
+ const vk::PlatformInterface& getPlatformInterface (void) const
+ {
+ return m_driver;
+ }
+
+private:
+ const tcu::DynamicFunctionLibrary m_library;
+ const vk::PlatformDriver m_driver;
+};
+
+class X11VulkanPlatform : public vk::Platform
+{
+public:
+ vk::Library* createLibrary (void) const
+ {
+ return new VulkanLibrary();
+ }
+};
+
class X11Platform : public tcu::Platform
{
public:
- X11Platform (void);
- bool processEvents (void) { return !m_eventState.getQuitFlag(); }
- const glu::Platform& getGLPlatform (void) const { return m_glPlatform; }
+ X11Platform (void);
+ bool processEvents (void) { return !m_eventState.getQuitFlag(); }
+ const glu::Platform& getGLPlatform (void) const { return m_glPlatform; }
#if defined (DEQP_SUPPORT_EGL)
- const eglu::Platform& getEGLPlatform (void) const { return m_eglPlatform; }
+ const eglu::Platform& getEGLPlatform (void) const { return m_eglPlatform; }
#endif // DEQP_SUPPORT_EGL
+ const vk::Platform& getVulkanPlatform (void) const { return m_vkPlatform; }
+
private:
EventState m_eventState;
#if defined (DEQP_SUPPORT_EGL)
x11::egl::Platform m_eglPlatform;
#endif // DEQP_SPPORT_EGL
X11GLPlatform m_glPlatform;
+ X11VulkanPlatform m_vkPlatform;
};
X11Platform::X11Platform (void)
#include "egluUtil.hpp"
#include "eglwLibrary.hpp"
#include "eglwEnums.hpp"
+#include "tcuFunctionLibrary.hpp"
// Assume no call translation is needed
#include <android/native_window.h>
return new NativeDisplay();
}
+// Vulkan
+
+class VulkanLibrary : public vk::Library
+{
+public:
+ VulkanLibrary (void)
+ : m_library ("libvulkan.so")
+ , m_driver (m_library)
+ {
+ }
+
+ const vk::PlatformInterface& getPlatformInterface (void) const
+ {
+ return m_driver;
+ }
+
+private:
+ const tcu::DynamicFunctionLibrary m_library;
+ const vk::PlatformDriver m_driver;
+};
+
// Platform
Platform::Platform (void)
return true;
}
+vk::Library* Platform::createLibrary (void) const
+{
+ return new VulkanLibrary();
+}
+
} // Android
} // tcu
#include "tcuPlatform.hpp"
#include "egluPlatform.hpp"
#include "gluPlatform.hpp"
+#include "vkPlatform.hpp"
#include "tcuAndroidWindow.hpp"
namespace tcu
namespace Android
{
-class Platform : public tcu::Platform, private eglu::Platform, private glu::Platform
+class Platform : public tcu::Platform, private eglu::Platform, private glu::Platform, private vk::Platform
{
public:
Platform (void);
virtual const glu::Platform& getGLPlatform (void) const { return static_cast<const glu::Platform&>(*this); }
virtual const eglu::Platform& getEGLPlatform (void) const { return static_cast<const eglu::Platform&>(*this); }
+ virtual const vk::Platform& getVulkanPlatform (void) const { return static_cast<const vk::Platform&>(*this); }
WindowRegistry& getWindowRegistry (void) { return m_windowRegistry; }
+ vk::Library* createLibrary (void) const;
+
private:
WindowRegistry m_windowRegistry;
};
#include "tcuNullRenderContext.hpp"
#include "egluNativeDisplay.hpp"
#include "eglwLibrary.hpp"
+#include "vkNullDriver.hpp"
namespace tcu
{
{
}
+vk::Library* Platform::createLibrary (void) const
+{
+ return vk::createNullDriver();
+}
+
} // null
} // tcu
#include "tcuPlatform.hpp"
#include "gluPlatform.hpp"
#include "egluPlatform.hpp"
+#include "vkPlatform.hpp"
namespace tcu
{
namespace null
{
-class Platform : public tcu::Platform, private glu::Platform, private eglu::Platform
+class Platform : public tcu::Platform, private glu::Platform, private eglu::Platform, private vk::Platform
{
public:
- Platform (void);
- virtual ~Platform (void);
+ Platform (void);
+ virtual ~Platform (void);
+
+ virtual const glu::Platform& getGLPlatform (void) const { return static_cast<const glu::Platform&>(*this); }
+ virtual const eglu::Platform& getEGLPlatform (void) const { return static_cast<const eglu::Platform&>(*this); }
+ virtual const vk::Platform& getVulkanPlatform (void) const { return static_cast<const vk::Platform&>(*this); }
+
+private:
+ virtual vk::Library* createLibrary (void) const;
- virtual const glu::Platform& getGLPlatform (void) const { return static_cast<const glu::Platform&>(*this); }
- virtual const eglu::Platform& getEGLPlatform (void) const { return static_cast<const eglu::Platform&>(*this); }
};
} // null
#include "tcuWin32Platform.hpp"
#include "tcuWGLContextFactory.hpp"
+#include "tcuFunctionLibrary.hpp"
#if defined(DEQP_SUPPORT_EGL)
# include "tcuWin32EGLNativeDisplayFactory.hpp"
namespace tcu
{
+class VulkanLibrary : public vk::Library
+{
+public:
+ VulkanLibrary (void)
+ : m_library ("vulkan.dll")
+ , m_driver (m_library)
+ {
+ }
+
+ const vk::PlatformInterface& getPlatformInterface (void) const
+ {
+ return m_driver;
+ }
+
+private:
+ const tcu::DynamicFunctionLibrary m_library;
+ const vk::PlatformDriver m_driver;
+};
+
Win32Platform::Win32Platform (void)
: m_instance(GetModuleHandle(NULL))
{
return true;
}
+vk::Library* Win32Platform::createLibrary (void) const
+{
+ return new VulkanLibrary();
+}
+
} // tcu
// Create platform
#include "tcuDefs.hpp"
#include "tcuPlatform.hpp"
#include "gluPlatform.hpp"
+#include "vkPlatform.hpp"
#include "tcuWin32API.h"
#if defined(DEQP_SUPPORT_EGL)
namespace tcu
{
-class Win32Platform : public tcu::Platform, private glu::Platform
+class Win32Platform : public tcu::Platform, private glu::Platform, private vk::Platform
#if defined(DEQP_SUPPORT_EGL)
, private eglu::Platform
#endif
const eglu::Platform& getEGLPlatform (void) const { return static_cast<const eglu::Platform&>(*this); }
#endif
+ const vk::Platform& getVulkanPlatform (void) const { return static_cast<const vk::Platform&>(*this); }
+
private:
+ vk::Library* createLibrary (void) const;
+
HINSTANCE m_instance;
};
self.binName = binName
MODULES = [
- Module("dE-IT", "internal", "de-internal-tests"),
- Module("dEQP-EGL", "egl", "deqp-egl"),
- Module("dEQP-GLES2", "gles2", "deqp-gles2"),
- Module("dEQP-GLES3", "gles3", "deqp-gles3"),
- Module("dEQP-GLES31", "gles31", "deqp-gles31"),
+ Module("dE-IT", "internal", "de-internal-tests"),
+ Module("dEQP-EGL", "egl", "deqp-egl"),
+ Module("dEQP-GLES2", "gles2", "deqp-gles2"),
+ Module("dEQP-GLES3", "gles3", "deqp-gles3"),
+ Module("dEQP-GLES31", "gles31", "deqp-gles31"),
+ Module("dEQP-VK", "../external/vulkancts/modules/vulkan", "deqp-vk"),
]
DEFAULT_BUILD_DIR = os.path.join(tempfile.gettempdir(), "deqp-caselists", "{targetName}-{buildType}")