--- /dev/null
+#
+# Copyright 2015 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+# GYP for building gpu
+{
+ 'variables': {
+ 'variables':
+ {
+ 'vulkan_merged_into_skia%': '0',
+ },
+ 'vulkan_merged_into_skia%': '<(vulkan_merged_into_skia)',
+ 'conditions' : [
+ [ 'vulkan_merged_into_skia == 1', {
+ 'skia_gyp_path%': '../gyp',
+ 'skia_root_path%': '../',
+ 'vulkan_third_party_path%': '..\\third_party',
+ }, {
+ 'skia_gyp_path%': '../skia/gyp',
+ 'skia_root_path%': '../skia',
+ 'vulkan_third_party_path%': '..\\..\\third_party',
+ }],
+ ],
+ },
+ 'target_defaults': {
+ 'defines': [
+ 'VK_PROTOTYPES',
+ ],
+ 'conditions': [
+ ['skia_os == "win"', {
+ 'all_dependent_settings': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': [
+ 'vulkan-1.lib',
+ 'shaderc.lib',
+ 'shaderc_util.lib',
+ 'glslang.lib',
+ 'OSDependent.lib',
+ 'OGLCompiler.lib',
+ 'SPIRV-Tools.lib',
+ 'SPIRV.lib',
+ ],
+ },
+ },
+ },
+ 'link_settings': {
+ 'configurations': {
+ 'Debug': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalLibraryDirectories': [
+ '<(vulkan_third_party_path)\\vulkan\\',
+ '<(vulkan_third_party_path)\\shaderc\\Debug\\',
+ ],
+ },
+ },
+ },
+ 'Release': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalLibraryDirectories': [
+ '<(vulkan_third_party_path)\\vulkan\\',
+ '<(vulkan_third_party_path)\\shaderc\\Release\\',
+ ],
+ },
+ },
+ },
+ 'Debug_x64': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalLibraryDirectories': [
+ '<(vulkan_third_party_path)\\vulkan\\',
+ '<(vulkan_third_party_path)\\shaderc\\Debug\\',
+ ],
+ },
+ },
+ },
+ 'Release_x64': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalLibraryDirectories': [
+ '<(vulkan_third_party_path)\\vulkan\\',
+ '<(vulkan_third_party_path)\\shaderc\\Release\\',
+ ],
+ },
+ },
+ },
+ },
+ },
+ }],
+ ['skia_os != "win"', {
+ 'sources/': [ ['exclude', '_win.(h|cpp)$'],
+ ],
+ }],
+ ['skia_os != "mac"', {
+ 'sources/': [ ['exclude', '_mac.(h|cpp|m|mm)$'],
+ ],
+ }],
+ ['skia_os != "linux" and skia_os != "chromeos"', {
+ 'sources/': [ ['exclude', '_glx.(h|cpp)$'],
+ ],
+ }],
+ ['skia_os != "ios"', {
+ 'sources/': [ ['exclude', '_iOS.(h|cpp|m|mm)$'],
+ ],
+ }],
+ ['skia_os != "android"', {
+ 'sources/': [ ['exclude', '_android.(h|cpp)$'],
+ ],
+ }],
+ ['skia_os != "nacl"', {
+ 'sources/': [ ['exclude', '_nacl.(h|cpp)$'],
+ ],
+ }],
+ ['skia_os == "nacl" or skia_egl == 0', {
+ 'sources/': [ ['exclude', '_egl.(h|cpp)$'],
+ ],
+ }],
+ ['skia_os == "android"', {
+ 'sources/': [ ['exclude', 'GrGLCreateNativeInterface_egl.cpp'],
+ ],
+ }],
+ ['skia_egl == 1', {
+ 'sources/': [ ['exclude', '_glx.(h|cpp)$'],
+ ],
+ }],
+ # nullify the targets in this gyp file if skia_gpu is 0
+ [ 'skia_gpu == 0', {
+ 'sources/': [
+ ['exclude', '.*'],
+ ],
+ 'defines/': [
+ ['exclude', '.*'],
+ ],
+ 'include_dirs/': [
+ ['exclude', '.*'],
+ ],
+ 'link_settings': {
+ 'libraries/': [
+ ['exclude', '.*'],
+ ],
+ },
+ 'direct_dependent_settings': {
+ 'defines/': [
+ ['exclude', '.*'],
+ ],
+ 'include_dirs/': [
+ ['exclude', '.*'],
+ ],
+ },
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'conditions': [
+ [ 'skia_os == "win"', {
+ 'defines': [
+ 'GR_GL_FUNCTION_TYPE=__stdcall',
+ ],
+ }],
+ ],
+ 'include_dirs': [
+ '../include/gpu',
+ '../third_party/'
+ ],
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'skgpu_vk',
+ 'product_name': 'skia_skgpu_vk',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ 'dependencies': [
+ '<(skia_gyp_path)/core.gyp:*',
+ '<(skia_gyp_path)/utils.gyp:utils',
+ '<(skia_gyp_path)/etc1.gyp:libetc1',
+ '<(skia_gyp_path)/ktx.gyp:libSkKTX',
+ ],
+ 'includes': [
+ 'gpuVk.gypi',
+ ],
+ 'include_dirs': [
+ '../include/gpu',
+ '../src/gpu',
+ '../third_party',
+ '<(skia_root_path)/include/gpu',
+ '<(skia_root_path)/include/private',
+ '<(skia_root_path)/src/core',
+ '<(skia_root_path)/src/gpu',
+ '<(skia_root_path)/src/image/',
+ ],
+ 'sources': [
+ '<@(skgpu_vk_sources)',
+ 'gpuVk.gypi', # Makes the gypi appear in IDEs (but does not modify the build).
+ ],
+ },
+ ],
+}
--- /dev/null
+#
+# Copyright 2015 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+# Include this gypi to include all 'gpu' files
+# The parent gyp/gypi file must define
+# 'skia_src_path' e.g. skia/trunk/src
+# 'skia_include_path' e.g. skia/trunk/include
+#
+# The skia build defines these in common_variables.gypi
+#
+{
+ 'variables': {
+ 'skgpu_vk_sources': [
+ '../include/gpu/vk/GrVkInterface.h',
+ '../src/gpu/vk/GrVkBuffer.cpp',
+ '../src/gpu/vk/GrVkBuffer.h',
+ '../src/gpu/vk/GrVkCaps.cpp',
+ '../src/gpu/vk/GrVkCaps.h',
+ '../src/gpu/vk/GrVkCommandBuffer.cpp',
+ '../src/gpu/vk/GrVkCommandBuffer.h',
+ '../src/gpu/vk/GrVkDescriptorPool.cpp',
+ '../src/gpu/vk/GrVkDescriptorPool.h',
+ '../src/gpu/vk/GrVkFramebuffer.cpp',
+ '../src/gpu/vk/GrVkFramebuffer.h',
+ '../src/gpu/vk/GrVkGpu.cpp',
+ '../src/gpu/vk/GrVkGpu.h',
+ '../src/gpu/vk/GrVkImage.cpp',
+ '../src/gpu/vk/GrVkImage.h',
+ '../src/gpu/vk/GrVkImageView.cpp',
+ '../src/gpu/vk/GrVkImageView.h',
+ '../src/gpu/vk/GrVkIndexBuffer.cpp',
+ '../src/gpu/vk/GrVkIndexBuffer.h',
+ '../src/gpu/vk/GrVkInterface.cpp',
+ '../src/gpu/vk/GrVkMemory.cpp',
+ '../src/gpu/vk/GrVkMemory.h',
+ '../src/gpu/vk/GrVkPipeline.cpp',
+ '../src/gpu/vk/GrVkPipeline.h',
+ '../src/gpu/vk/GrVkProgram.cpp',
+ '../src/gpu/vk/GrVkProgram.h',
+ '../src/gpu/vk/GrVkProgramBuilder.cpp',
+ '../src/gpu/vk/GrVkProgramBuilder.h',
+ '../src/gpu/vk/GrVkProgramDataManager.cpp',
+ '../src/gpu/vk/GrVkProgramDataManager.h',
+ '../src/gpu/vk/GrVkProgramDesc.cpp',
+ '../src/gpu/vk/GrVkProgramDesc.h',
+ '../src/gpu/vk/GrVkRenderPass.cpp',
+ '../src/gpu/vk/GrVkRenderPass.h',
+ '../src/gpu/vk/GrVkRenderTarget.cpp',
+ '../src/gpu/vk/GrVkRenderTarget.h',
+ '../src/gpu/vk/GrVkResource.h',
+ '../src/gpu/vk/GrVkResourceProvider.cpp',
+ '../src/gpu/vk/GrVkResourceProvider.h',
+ '../src/gpu/vk/GrVkSampler.cpp',
+ '../src/gpu/vk/GrVkSampler.h',
+ '../src/gpu/vk/GrVkStencilAttachment.cpp',
+ '../src/gpu/vk/GrVkStencilAttachment.h',
+ '../src/gpu/vk/GrVkTexture.cpp',
+ '../src/gpu/vk/GrVkTexture.h',
+ '../src/gpu/vk/GrVkTextureRenderTarget.cpp',
+ '../src/gpu/vk/GrVkTextureRenderTarget.h',
+ '../src/gpu/vk/GrVkTransferBuffer.cpp',
+ '../src/gpu/vk/GrVkTransferBuffer.h',
+ '../src/gpu/vk/GrVkUniformBuffer.cpp',
+ '../src/gpu/vk/GrVkUniformBuffer.h',
+ '../src/gpu/vk/GrVkUniformHandler.cpp',
+ '../src/gpu/vk/GrVkUniformHandler.h',
+ '../src/gpu/vk/GrVkUtil.cpp',
+ '../src/gpu/vk/GrVkUtil.h',
+ '../src/gpu/vk/GrVkVaryingHandler.cpp',
+ '../src/gpu/vk/GrVkVaryingHandler.h',
+ '../src/gpu/vk/GrVkVertexBuffer.cpp',
+ '../src/gpu/vk/GrVkVertexBuffer.h',
+
+# '../testfiles/vktest.cpp',
+ ],
+ },
+}
'<(skia_include_path)/views/SkOSWindow_Unix.h',
'<(skia_include_path)/views/SkOSWindow_Win.h',
'<(skia_include_path)/views/SkWindow.h',
+ '<(skia_include_path)/gpu/vk',
],
},
'include_dirs': [
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRadInterface_DEFINED
+#define GrRadInterface_DEFINED
+
+#include "SkRefCnt.h"
+
+#include "vulkan/vulkan.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The default interface is returned by GrVkDefaultInterface. This function's
+ * implementation is platform-specific.
+ */
+
+struct GrVkInterface;
+
+/**
+ * Creates a GrVkInterface.
+ */
+const GrVkInterface* GrVkCreateInterface(VkInstance instance);
+
+
+/**
+ * GrContext uses the following interface to make all calls into Vulkan. When a
+ * GrContext is created it is given a GrVkInterface. All functions that should be
+ * available based on the Vulkan's version must be non-NULL or GrContext creation
+ * will fail. This can be tested with the validate() method.
+ */
+struct SK_API GrVkInterface : public SkRefCnt {
+private:
+ // simple wrapper class that exists only to initialize a pointer to NULL
+ template <typename FNPTR_TYPE> class VkPtr {
+ public:
+ VkPtr() : fPtr(NULL) {}
+ VkPtr operator=(FNPTR_TYPE ptr) { fPtr = ptr; return *this; }
+ operator FNPTR_TYPE() const { return fPtr; }
+ private:
+ FNPTR_TYPE fPtr;
+ };
+
+ typedef SkRefCnt INHERITED;
+
+public:
+ GrVkInterface();
+
+ // Validates that the GrVkInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for Vulkan version.
+ bool validate() const;
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ VkPtr<PFN_vkCreateInstance> fCreateInstance;
+ VkPtr<PFN_vkDestroyInstance> fDestroyInstance;
+ VkPtr<PFN_vkEnumeratePhysicalDevices> fEnumeratePhysicalDevices;
+ VkPtr<PFN_vkGetPhysicalDeviceFeatures> fGetPhysicalDeviceFeatures;
+ VkPtr<PFN_vkGetPhysicalDeviceFormatProperties> fGetPhysicalDeviceFormatProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceImageFormatProperties> fGetPhysicalDeviceImageFormatProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceProperties> fGetPhysicalDeviceProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceQueueFamilyProperties> fGetPhysicalDeviceQueueFamilyProperties;
+ VkPtr<PFN_vkGetPhysicalDeviceMemoryProperties> fGetPhysicalDeviceMemoryProperties;
+ VkPtr<PFN_vkCreateDevice> fCreateDevice;
+ VkPtr<PFN_vkDestroyDevice> fDestroyDevice;
+ VkPtr<PFN_vkEnumerateInstanceExtensionProperties> fEnumerateInstanceExtensionProperties;
+ VkPtr<PFN_vkEnumerateDeviceExtensionProperties> fEnumerateDeviceExtensionProperties;
+ VkPtr<PFN_vkEnumerateInstanceLayerProperties> fEnumerateInstanceLayerProperties;
+ VkPtr<PFN_vkEnumerateDeviceLayerProperties> fEnumerateDeviceLayerProperties;
+ VkPtr<PFN_vkGetDeviceQueue> fGetDeviceQueue;
+ VkPtr<PFN_vkQueueSubmit> fQueueSubmit;
+ VkPtr<PFN_vkQueueWaitIdle> fQueueWaitIdle;
+ VkPtr<PFN_vkDeviceWaitIdle> fDeviceWaitIdle;
+ VkPtr<PFN_vkAllocateMemory> fAllocateMemory;
+ VkPtr<PFN_vkFreeMemory> fFreeMemory;
+ VkPtr<PFN_vkMapMemory> fMapMemory;
+ VkPtr<PFN_vkUnmapMemory> fUnmapMemory;
+ VkPtr<PFN_vkFlushMappedMemoryRanges> fFlushMappedMemoryRanges;
+ VkPtr<PFN_vkInvalidateMappedMemoryRanges> fInvalidateMappedMemoryRanges;
+ VkPtr<PFN_vkGetDeviceMemoryCommitment> fGetDeviceMemoryCommitment;
+ VkPtr<PFN_vkBindBufferMemory> fBindBufferMemory;
+ VkPtr<PFN_vkBindImageMemory> fBindImageMemory;
+ VkPtr<PFN_vkGetBufferMemoryRequirements> fGetBufferMemoryRequirements;
+ VkPtr<PFN_vkGetImageMemoryRequirements> fGetImageMemoryRequirements;
+ VkPtr<PFN_vkGetImageSparseMemoryRequirements> fGetImageSparseMemoryRequirements;
+ VkPtr<PFN_vkGetPhysicalDeviceSparseImageFormatProperties> fGetPhysicalDeviceSparseImageFormatProperties;
+ VkPtr<PFN_vkQueueBindSparse> fQueueBindSparse;
+ VkPtr<PFN_vkCreateFence> fCreateFence;
+ VkPtr<PFN_vkDestroyFence> fDestroyFence;
+ VkPtr<PFN_vkResetFences> fResetFences;
+ VkPtr<PFN_vkGetFenceStatus> fGetFenceStatus;
+ VkPtr<PFN_vkWaitForFences> fWaitForFences;
+ VkPtr<PFN_vkCreateSemaphore> fCreateSemaphore;
+ VkPtr<PFN_vkDestroySemaphore> fDestroySemaphore;
+ VkPtr<PFN_vkCreateEvent> fCreateEvent;
+ VkPtr<PFN_vkDestroyEvent> fDestroyEvent;
+ VkPtr<PFN_vkGetEventStatus> fGetEventStatus;
+ VkPtr<PFN_vkSetEvent> fSetEvent;
+ VkPtr<PFN_vkResetEvent> fResetEvent;
+ VkPtr<PFN_vkCreateQueryPool> fCreateQueryPool;
+ VkPtr<PFN_vkDestroyQueryPool> fDestroyQueryPool;
+ VkPtr<PFN_vkGetQueryPoolResults> fGetQueryPoolResults;
+ VkPtr<PFN_vkCreateBuffer> fCreateBuffer;
+ VkPtr<PFN_vkDestroyBuffer> fDestroyBuffer;
+ VkPtr<PFN_vkCreateBufferView> fCreateBufferView;
+ VkPtr<PFN_vkDestroyBufferView> fDestroyBufferView;
+ VkPtr<PFN_vkCreateImage> fCreateImage;
+ VkPtr<PFN_vkDestroyImage> fDestroyImage;
+ VkPtr<PFN_vkGetImageSubresourceLayout> fGetImageSubresourceLayout;
+ VkPtr<PFN_vkCreateImageView> fCreateImageView;
+ VkPtr<PFN_vkDestroyImageView> fDestroyImageView;
+ VkPtr<PFN_vkCreateShaderModule> fCreateShaderModule;
+ VkPtr<PFN_vkDestroyShaderModule> fDestroyShaderModule;
+ VkPtr<PFN_vkCreatePipelineCache> fCreatePipelineCache;
+ VkPtr<PFN_vkDestroyPipelineCache> fDestroyPipelineCache;
+ VkPtr<PFN_vkGetPipelineCacheData> fGetPipelineCacheData;
+ VkPtr<PFN_vkMergePipelineCaches> fMergePipelineCaches;
+ VkPtr<PFN_vkCreateGraphicsPipelines> fCreateGraphicsPipelines;
+ VkPtr<PFN_vkCreateComputePipelines> fCreateComputePipelines;
+ VkPtr<PFN_vkDestroyPipeline> fDestroyPipeline;
+ VkPtr<PFN_vkCreatePipelineLayout> fCreatePipelineLayout;
+ VkPtr<PFN_vkDestroyPipelineLayout> fDestroyPipelineLayout;
+ VkPtr<PFN_vkCreateSampler> fCreateSampler;
+ VkPtr<PFN_vkDestroySampler> fDestroySampler;
+ VkPtr<PFN_vkCreateDescriptorSetLayout> fCreateDescriptorSetLayout;
+ VkPtr<PFN_vkDestroyDescriptorSetLayout> fDestroyDescriptorSetLayout;
+ VkPtr<PFN_vkCreateDescriptorPool> fCreateDescriptorPool;
+ VkPtr<PFN_vkDestroyDescriptorPool> fDestroyDescriptorPool;
+ VkPtr<PFN_vkResetDescriptorPool> fResetDescriptorPool;
+ VkPtr<PFN_vkAllocateDescriptorSets> fAllocateDescriptorSets;
+ VkPtr<PFN_vkFreeDescriptorSets> fFreeDescriptorSets;
+ VkPtr<PFN_vkUpdateDescriptorSets> fUpdateDescriptorSets;
+ VkPtr<PFN_vkCreateFramebuffer> fCreateFramebuffer;
+ VkPtr<PFN_vkDestroyFramebuffer> fDestroyFramebuffer;
+ VkPtr<PFN_vkCreateRenderPass> fCreateRenderPass;
+ VkPtr<PFN_vkDestroyRenderPass> fDestroyRenderPass;
+ VkPtr<PFN_vkGetRenderAreaGranularity> fGetRenderAreaGranularity;
+ VkPtr<PFN_vkCreateCommandPool> fCreateCommandPool;
+ VkPtr<PFN_vkDestroyCommandPool> fDestroyCommandPool;
+ VkPtr<PFN_vkResetCommandPool> fResetCommandPool;
+ VkPtr<PFN_vkAllocateCommandBuffers> fAllocateCommandBuffers;
+ VkPtr<PFN_vkFreeCommandBuffers> fFreeCommandBuffers;
+ VkPtr<PFN_vkBeginCommandBuffer> fBeginCommandBuffer;
+ VkPtr<PFN_vkEndCommandBuffer> fEndCommandBuffer;
+ VkPtr<PFN_vkResetCommandBuffer> fResetCommandBuffer;
+ VkPtr<PFN_vkCmdBindPipeline> fCmdBindPipeline;
+ VkPtr<PFN_vkCmdSetViewport> fCmdSetViewport;
+ VkPtr<PFN_vkCmdSetScissor> fCmdSetScissor;
+ VkPtr<PFN_vkCmdSetLineWidth> fCmdSetLineWidth;
+ VkPtr<PFN_vkCmdSetDepthBias> fCmdSetDepthBias;
+ VkPtr<PFN_vkCmdSetBlendConstants> fCmdSetBlendConstants;
+ VkPtr<PFN_vkCmdSetDepthBounds> fCmdSetDepthBounds;
+ VkPtr<PFN_vkCmdSetStencilCompareMask> fCmdSetStencilCompareMask;
+ VkPtr<PFN_vkCmdSetStencilWriteMask> fCmdSetStencilWriteMask;
+ VkPtr<PFN_vkCmdSetStencilReference> fCmdSetStencilReference;
+ VkPtr<PFN_vkCmdBindDescriptorSets> fCmdBindDescriptorSets;
+ VkPtr<PFN_vkCmdBindIndexBuffer> fCmdBindIndexBuffer;
+ VkPtr<PFN_vkCmdBindVertexBuffers> fCmdBindVertexBuffers;
+ VkPtr<PFN_vkCmdDraw> fCmdDraw;
+ VkPtr<PFN_vkCmdDrawIndexed> fCmdDrawIndexed;
+ VkPtr<PFN_vkCmdDrawIndirect> fCmdDrawIndirect;
+ VkPtr<PFN_vkCmdDrawIndexedIndirect> fCmdDrawIndexedIndirect;
+ VkPtr<PFN_vkCmdDispatch> fCmdDispatch;
+ VkPtr<PFN_vkCmdDispatchIndirect> fCmdDispatchIndirect;
+ VkPtr<PFN_vkCmdCopyBuffer> fCmdCopyBuffer;
+ VkPtr<PFN_vkCmdCopyImage> fCmdCopyImage;
+ VkPtr<PFN_vkCmdBlitImage> fCmdBlitImage;
+ VkPtr<PFN_vkCmdCopyBufferToImage> fCmdCopyBufferToImage;
+ VkPtr<PFN_vkCmdCopyImageToBuffer> fCmdCopyImageToBuffer;
+ VkPtr<PFN_vkCmdUpdateBuffer> fCmdUpdateBuffer;
+ VkPtr<PFN_vkCmdFillBuffer> fCmdFillBuffer;
+ VkPtr<PFN_vkCmdClearColorImage> fCmdClearColorImage;
+ VkPtr<PFN_vkCmdClearDepthStencilImage> fCmdClearDepthStencilImage;
+ VkPtr<PFN_vkCmdClearAttachments> fCmdClearAttachments;
+ VkPtr<PFN_vkCmdResolveImage> fCmdResolveImage;
+ VkPtr<PFN_vkCmdSetEvent> fCmdSetEvent;
+ VkPtr<PFN_vkCmdResetEvent> fCmdResetEvent;
+ VkPtr<PFN_vkCmdWaitEvents> fCmdWaitEvents;
+ VkPtr<PFN_vkCmdPipelineBarrier> fCmdPipelineBarrier;
+ VkPtr<PFN_vkCmdBeginQuery> fCmdBeginQuery;
+ VkPtr<PFN_vkCmdEndQuery> fCmdEndQuery;
+ VkPtr<PFN_vkCmdResetQueryPool> fCmdResetQueryPool;
+ VkPtr<PFN_vkCmdWriteTimestamp> fCmdWriteTimestamp;
+ VkPtr<PFN_vkCmdCopyQueryPoolResults> fCmdCopyQueryPoolResults;
+ VkPtr<PFN_vkCmdPushConstants> fCmdPushConstants;
+ VkPtr<PFN_vkCmdBeginRenderPass> fCmdBeginRenderPass;
+ VkPtr<PFN_vkCmdNextSubpass> fCmdNextSubpass;
+ VkPtr<PFN_vkCmdEndRenderPass> fCmdEndRenderPass;
+ VkPtr<PFN_vkCmdExecuteCommands> fCmdExecuteCommands;
+ VkPtr<PFN_vkDestroySurfaceKHR> fDestroySurfaceKHR;
+ VkPtr<PFN_vkGetPhysicalDeviceSurfaceSupportKHR> fGetPhysicalDeviceSurfaceSupportKHR;
+ VkPtr<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR> fGetPhysicalDeviceSurfaceCapabilitiesKHR;
+ VkPtr<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR> fGetPhysicalDeviceSurfaceFormatsKHR;
+ VkPtr<PFN_vkGetPhysicalDeviceSurfacePresentModesKHR> fGetPhysicalDeviceSurfacePresentModesKHR;
+ VkPtr<PFN_vkCreateSwapchainKHR> fCreateSwapchainKHR;
+ VkPtr<PFN_vkDestroySwapchainKHR> fDestroySwapchainKHR;
+ VkPtr<PFN_vkGetSwapchainImagesKHR> fGetSwapchainImagesKHR;
+ VkPtr<PFN_vkAcquireNextImageKHR> fAcquireNextImageKHR;
+ VkPtr<PFN_vkQueuePresentKHR> fQueuePresentKHR;
+ VkPtr<PFN_vkGetPhysicalDeviceDisplayPropertiesKHR> fGetPhysicalDeviceDisplayPropertiesKHR;
+ VkPtr<PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR> fGetPhysicalDeviceDisplayPlanePropertiesKHR;
+ VkPtr<PFN_vkGetDisplayPlaneSupportedDisplaysKHR> fGetDisplayPlaneSupportedDisplaysKHR;
+ VkPtr<PFN_vkGetDisplayModePropertiesKHR> fGetDisplayModePropertiesKHR;
+ VkPtr<PFN_vkCreateDisplayModeKHR> fCreateDisplayModeKHR;
+ VkPtr<PFN_vkGetDisplayPlaneCapabilitiesKHR> fGetDisplayPlaneCapabilitiesKHR;
+ VkPtr<PFN_vkCreateDisplayPlaneSurfaceKHR> fCreateDisplayPlaneSurfaceKHR;
+ VkPtr<PFN_vkCreateSharedSwapchainsKHR> fCreateSharedSwapchainsKHR;
+ } fFunctions;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkBuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkMemory.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
+ VkBuffer buffer;
+ VkDeviceMemory alloc;
+
+ // create the buffer object
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = desc.fSizeInBytes;
+ switch (desc.fType) {
+ case kVertex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ break;
+ case kIndex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ break;
+ case kUniform_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ break;
+ case kCopyRead_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ break;
+ case kCopyWrite_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ break;
+
+ }
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+
+ VkResult err;
+ err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
+ if (err) {
+ return nullptr;
+ }
+
+ VkMemoryPropertyFlags requiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
+ buffer,
+ requiredMemProps,
+ &alloc)) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ return nullptr;
+ }
+
+ const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc);
+ if (!resource) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr));
+ return nullptr;
+ }
+
+ return resource;
+}
+
+
+void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccesMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const {
+ VkBufferMemoryBarrier bufferMemoryBarrier = {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
+ NULL, // pNext
+ srcAccessMask, // srcAccessMask
+ dstAccesMask, // dstAccessMask
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ this->buffer(), // buffer
+ 0, // offset
+ fDesc.fSizeInBytes, // size
+ };
+
+ // TODO: restrict to area of buffer we're interested in
+ gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMemoryBarrier);
+}
+
+void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fBuffer);
+ SkASSERT(fAlloc);
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
+ VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
+}
+
+void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
+ VALIDATE();
+ fResource->unref(gpu);
+ fResource = nullptr;
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+void GrVkBuffer::vkAbandon() {
+ fResource->unrefAndAbandon();
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+void* GrVkBuffer::vkMap(const GrVkGpu* gpu) {
+ VALIDATE();
+ SkASSERT(!this->vkIsMapped());
+
+ VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, VK_WHOLE_SIZE, 0, &fMapPtr));
+ if (err) {
+ fMapPtr = nullptr;
+ }
+
+ VALIDATE();
+ return fMapPtr;
+}
+
+void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) {
+ VALIDATE();
+ SkASSERT(this->vkIsMapped());
+
+ VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));
+
+ fMapPtr = nullptr;
+}
+
+bool GrVkBuffer::vkIsMapped() const {
+ VALIDATE();
+ return SkToBool(fMapPtr);
+}
+
+bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) {
+ SkASSERT(!this->vkIsMapped());
+ VALIDATE();
+ if (srcSizeInBytes > fDesc.fSizeInBytes) {
+ return false;
+ }
+
+ void* mapPtr;
+ VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBytes, 0, &mapPtr));
+
+ if (VK_SUCCESS != err) {
+ return false;
+ }
+
+ memcpy(mapPtr, src, srcSizeInBytes);
+
+ VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));
+
+ return true;
+}
+
+void GrVkBuffer::validate() const {
+ SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
+ || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType
+ || kUniform_Type == fDesc.fType);
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBuffer_DEFINED
+#define GrVkBuffer_DEFINED
+
+#include "vk/GrVkInterface.h"
+#include "GrVkResource.h"
+
+class GrVkGpu;
+
+/**
+ * This class serves as the base of GrVk*Buffer classes. It was written to avoid code
+ * duplication in those classes.
+ */
+class GrVkBuffer : public SkNoncopyable {
+public:
+ ~GrVkBuffer() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fResource);
+ }
+
+ VkBuffer buffer() const { return fResource->fBuffer; }
+ VkDeviceMemory alloc() const { return fResource->fAlloc; }
+ const GrVkResource* resource() const { return fResource; }
+ size_t size() const { return fDesc.fSizeInBytes; }
+
+ void addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const;
+
+ enum Type {
+ kVertex_Type,
+ kIndex_Type,
+ kUniform_Type,
+ kCopyRead_Type,
+ kCopyWrite_Type,
+ };
+
+protected:
+ struct Desc {
+ size_t fSizeInBytes;
+ Type fType; // vertex buffer, index buffer, etc.
+ bool fDynamic;
+ };
+
+ class Resource : public GrVkResource {
+ public:
+ Resource(VkBuffer buf, VkDeviceMemory alloc) : INHERITED(), fBuffer(buf), fAlloc(alloc) {}
+
+ VkBuffer fBuffer;
+ VkDeviceMemory fAlloc;
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const;
+
+ typedef GrVkResource INHERITED;
+ };
+
+ // convenience routine for raw buffer creation
+ static const Resource* Create(const GrVkGpu* gpu,
+ const Desc& descriptor);
+
+ GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
+ : fDesc(desc), fResource(resource), fMapPtr(nullptr) {
+ }
+
+ void* vkMap(const GrVkGpu* gpu);
+ void vkUnmap(const GrVkGpu* gpu);
+ bool vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes);
+
+ void vkAbandon();
+ void vkRelease(const GrVkGpu* gpu);
+
+private:
+ void validate() const;
+ bool vkIsMapped() const;
+
+ Desc fDesc;
+ const Resource* fResource;
+ void* fMapPtr;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkCaps.h"
+
+#include "GrVkUtil.h"
+#include "glsl/GrGLSLCaps.h"
+#include "vk/GrVkInterface.h"
+
+GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev) : INHERITED(contextOptions) {
+ /**************************************************************************
+ * GrDrawTargetCaps fields
+ **************************************************************************/
+ fMipMapSupport = false; //TODO: figure this out
+ fNPOTTextureTileSupport = false; //TODO: figure this out
+ fTwoSidedStencilSupport = false; //TODO: figure this out
+ fStencilWrapOpsSupport = false; //TODO: figure this out
+ fDiscardRenderTargetSupport = false; //TODO: figure this out
+ fReuseScratchTextures = true; //TODO: figure this out
+ fGpuTracingSupport = false; //TODO: figure this out
+ fCompressedTexSubImageSupport = false; //TODO: figure this out
+ fOversizedStencilSupport = false; //TODO: figure this out
+
+ fUseDrawInsteadOfClear = false; //TODO: figure this out
+
+ fMapBufferFlags = kNone_MapFlags; //TODO: figure this out
+ fGeometryBufferMapThreshold = SK_MaxS32; //TODO: figure this out
+
+ fMaxRenderTargetSize = 4096; // minimum required by spec
+ fMaxTextureSize = 4096; // minimum required by spec
+ fMaxColorSampleCount = 4; // minimum required by spec
+ fMaxStencilSampleCount = 4; // minimum required by spec
+
+
+ fShaderCaps.reset(new GrGLSLCaps(contextOptions));
+
+ /**************************************************************************
+ * GrVkCaps fields
+ **************************************************************************/
+ fMaxSampledTextures = 16; // Spec requires a minimum of 16 sampled textures per stage
+
+ this->init(contextOptions, vkInterface, physDev);
+}
+
+void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev) {
+
+ this->initGLSLCaps(vkInterface, physDev);
+ this->initConfigTexturableTable(vkInterface, physDev);
+ this->initConfigRenderableTable(vkInterface, physDev);
+ this->initStencilFormats(vkInterface, physDev);
+
+ VkPhysicalDeviceProperties properties;
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties(physDev, &properties));
+
+ // We could actually querey and get a max size for each config, however maxImageDimension2D will
+ // give the minimum max size across all configs. So for simplicity we will use that for now.
+ fMaxRenderTargetSize = properties.limits.maxImageDimension2D;
+ fMaxTextureSize = properties.limits.maxImageDimension2D;
+
+ this->initSampleCount(properties);
+
+ fMaxSampledTextures = SkTMin(properties.limits.maxPerStageDescriptorSampledImages,
+ properties.limits.maxPerStageDescriptorSamplers);
+
+ this->applyOptionsOverrides(contextOptions);
+ // need to friend GrVkCaps in GrGLSLCaps.h
+ // GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ // glslCaps->applyOptionsOverrides(contextOptions);
+}
+
+int get_max_sample_count(VkSampleCountFlags flags) {
+ SkASSERT(flags & VK_SAMPLE_COUNT_1_BIT);
+ if (!(flags & VK_SAMPLE_COUNT_2_BIT)) {
+ return 0;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_4_BIT)) {
+ return 2;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_8_BIT)) {
+ return 4;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_16_BIT)) {
+ return 8;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_32_BIT)) {
+ return 16;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_64_BIT)) {
+ return 32;
+ }
+ return 64;
+}
+
+void GrVkCaps::initSampleCount(const VkPhysicalDeviceProperties& properties) {
+ VkSampleCountFlags colorSamples = properties.limits.framebufferColorSampleCounts;
+ VkSampleCountFlags stencilSamples = properties.limits.framebufferStencilSampleCounts;
+
+ fMaxColorSampleCount = get_max_sample_count(colorSamples);
+ fMaxStencilSampleCount = get_max_sample_count(stencilSamples);
+}
+
+void GrVkCaps::initGLSLCaps(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ // TODO: actually figure out a correct version here
+ glslCaps->fVersionDeclString = "#version 140\n";
+
+ // fConfigOutputSwizzle will default to RGBA so we only need to set it for alpha only config.
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ GrPixelConfig config = static_cast<GrPixelConfig>(i);
+ if (GrPixelConfigIsAlphaOnly(config)) {
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::RRRR();
+ glslCaps->fConfigOutputSwizzle[i] = GrSwizzle::AAAA();
+ } else {
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::RGBA();
+ }
+ }
+}
+
+static void format_supported_for_feature(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ VkFormat format,
+ VkFormatFeatureFlagBits featureBit,
+ bool* linearSupport,
+ bool* optimalSupport) {
+ VkFormatProperties props;
+ memset(&props, 0, sizeof(VkFormatProperties));
+ GR_VK_CALL(interface, GetPhysicalDeviceFormatProperties(physDev, format, &props));
+ *linearSupport = SkToBool(props.linearTilingFeatures & featureBit);
+ *optimalSupport = SkToBool(props.optimalTilingFeatures & featureBit);
+}
+
+static void config_supported_for_feature(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ GrPixelConfig config,
+ VkFormatFeatureFlagBits featureBit,
+ bool* linearSupport,
+ bool* optimalSupport) {
+ VkFormat format;
+ if (!GrPixelConfigToVkFormat(config, &format)) {
+ *linearSupport = false;
+ *optimalSupport = false;
+ return;
+ }
+ format_supported_for_feature(interface, physDev, format, featureBit,
+ linearSupport, optimalSupport);
+}
+
+// Currently just assumeing if something can be rendered to without MSAA it also works for MSAAA
+#define SET_CONFIG_IS_RENDERABLE(config) \
+ config_supported_for_feature(interface, \
+ physDev, \
+ config, \
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT, \
+ &fConfigLinearRenderSupport[config][kNo_MSAA], \
+ &fConfigRenderSupport[config][kNo_MSAA] ); \
+ fConfigRenderSupport[config][kYes_MSAA] = fConfigRenderSupport[config][kNo_MSAA]; \
+ fConfigLinearRenderSupport[config][kYes_MSAA] = fConfigLinearRenderSupport[config][kNo_MSAA];
+
+
+void GrVkCaps::initConfigRenderableTable(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ enum {
+ kNo_MSAA = 0,
+ kYes_MSAA = 1,
+ };
+
+ // Base render support
+ SET_CONFIG_IS_RENDERABLE(kAlpha_8_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGB_565_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGBA_4444_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGBA_8888_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kBGRA_8888_GrPixelConfig);
+
+ SET_CONFIG_IS_RENDERABLE(kSRGBA_8888_GrPixelConfig);
+
+ // Float render support
+ SET_CONFIG_IS_RENDERABLE(kRGBA_float_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGBA_half_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kAlpha_half_GrPixelConfig);
+}
+
+#define SET_CONFIG_IS_TEXTURABLE(config) \
+ config_supported_for_feature(interface, \
+ physDev, \
+ config, \
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, \
+ &fConfigLinearTextureSupport[config], \
+ &fConfigTextureSupport[config]);
+
+void GrVkCaps::initConfigTexturableTable(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ // Base texture support
+ SET_CONFIG_IS_TEXTURABLE(kAlpha_8_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGB_565_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_4444_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_8888_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kBGRA_8888_GrPixelConfig);
+
+ SET_CONFIG_IS_TEXTURABLE(kIndex_8_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kSRGBA_8888_GrPixelConfig);
+
+ // Compressed texture support
+ SET_CONFIG_IS_TEXTURABLE(kETC1_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kLATC_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kR11_EAC_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kASTC_12x12_GrPixelConfig);
+
+ // Float texture support
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_float_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_half_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kAlpha_half_GrPixelConfig);
+}
+
+#define SET_CONFIG_CAN_STENCIL(config) \
+ bool SK_MACRO_APPEND_LINE(linearSupported); \
+ bool SK_MACRO_APPEND_LINE(optimalSupported); \
+ format_supported_for_feature(interface, \
+ physDev, \
+ config.fInternalFormat, \
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, \
+ &SK_MACRO_APPEND_LINE(linearSupported), \
+ &SK_MACRO_APPEND_LINE(optimalSupported)); \
+ if (SK_MACRO_APPEND_LINE(linearSupported)) fLinearStencilFormats.push_back(config); \
+ if (SK_MACRO_APPEND_LINE(optimalSupported)) fStencilFormats.push_back(config);
+
+void GrVkCaps::initStencilFormats(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ // Build up list of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least.
+
+ static const StencilFormat
+ // internal Format stencil bits total bits packed?
+ gS8 = { VK_FORMAT_S8_UINT, 8, 8, false },
+ gD24S8 = { VK_FORMAT_D24_UNORM_S8_UINT, 8, 32, true };
+
+ // I'm simply assuming that these two will be supported since they are used in example code.
+ // TODO: Actaully figure this out
+ SET_CONFIG_CAN_STENCIL(gS8);
+ SET_CONFIG_CAN_STENCIL(gD24S8);
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkCaps_DEFINED
+#define GrVkCaps_DEFINED
+
+#include "GrCaps.h"
+#include "GrVkStencilAttachment.h"
+#include "vulkan/vulkan.h"
+
+struct GrVkInterface;
+class GrGLSLCaps;
+
+/**
+ * Stores some capabilities of a Vk backend.
+ */
+class GrVkCaps : public GrCaps {
+public:
+ typedef GrVkStencilAttachment::Format StencilFormat;
+
+ /**
+ * Creates a GrVkCaps that is set such that nothing is supported. The init function should
+ * be called to fill out the caps.
+ */
+ GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device);
+
+ bool isConfigTexturable(GrPixelConfig config) const override {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigTextureSupport[config];
+ }
+
+ bool isConfigRenderable(GrPixelConfig config, bool withMSAA) const override {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigRenderSupport[config][withMSAA];
+ }
+
+ bool isConfigRenderableLinearly(GrPixelConfig config, bool withMSAA) const {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigLinearRenderSupport[config][withMSAA];
+ }
+
+ bool isConfigTexurableLinearly(GrPixelConfig config) const {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigLinearTextureSupport[config];
+ }
+
+ /**
+ * Gets an array of legal stencil formats. These formats are not guaranteed to be supported by
+ * the driver but are legal VK_TEXTURE_FORMATs.
+ */
+ const SkTArray<StencilFormat, true>& stencilFormats() const {
+ return fStencilFormats;
+ }
+
+ /**
+ * Gets an array of legal stencil formats. These formats are not guaranteed to be supported by
+ * the driver but are legal VK_TEXTURE_FORMATs.
+ */
+ const SkTArray<StencilFormat, true>& linearStencilFormats() const {
+ return fLinearStencilFormats;
+ }
+
+ /**
+ * Returns the max number of sampled textures we can use in a program. This number is the max of
+ * max samplers and max sampled images. This number is technically the max sampled textures we
+ * can have per stage, but we'll use it for the whole program since for now we only do texture
+ * lookups in the fragment shader.
+ */
+ int maxSampledTextures() const {
+ return fMaxSampledTextures;
+ }
+
+
+ GrGLSLCaps* glslCaps() const { return reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get()); }
+
+private:
+ void init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device);
+ void initSampleCount(const VkPhysicalDeviceProperties& properties);
+ void initGLSLCaps(const GrVkInterface* interface, VkPhysicalDevice physDev);
+ void initConfigRenderableTable(const GrVkInterface* interface, VkPhysicalDevice physDev);
+ void initConfigTexturableTable(const GrVkInterface* interface, VkPhysicalDevice physDev);
+ void initStencilFormats(const GrVkInterface* interface, VkPhysicalDevice physDev);
+
+
+ bool fConfigTextureSupport[kGrPixelConfigCnt];
+ // For Vulkan we track whether a config is supported linearly (without need for swizzling)
+ bool fConfigLinearTextureSupport[kGrPixelConfigCnt];
+
+ // The first entry for each config is without msaa and the second is with.
+ bool fConfigRenderSupport[kGrPixelConfigCnt][2];
+ // The first entry for each config is without msaa and the second is with.
+ bool fConfigLinearRenderSupport[kGrPixelConfigCnt][2];
+
+ SkTArray<StencilFormat, true> fLinearStencilFormats;
+ SkTArray<StencilFormat, true> fStencilFormats;
+
+ int fMaxSampledTextures;
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkCommandBuffer.h"
+
+#include "GrVkFramebuffer.h"
+#include "GrVkImageView.h"
+#include "GrVkRenderPass.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkProgram.h"
+#include "GrVkTransferBuffer.h"
+#include "GrVkUtil.h"
+
+GrVkCommandBuffer* GrVkCommandBuffer::Create(const GrVkGpu* gpu, VkCommandPool cmdPool) {
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ cmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
+ &cmdInfo,
+ &cmdBuffer));
+ if (err) {
+ return nullptr;
+ }
+ return new GrVkCommandBuffer(cmdBuffer);
+}
+
+GrVkCommandBuffer::~GrVkCommandBuffer() {
+ // Should have ended any render pass we're in the middle of
+ SkASSERT(!fActiveRenderPass);
+}
+
+void GrVkCommandBuffer::invalidateState() {
+ fBoundVertexBuffer = 0;
+ fBoundVertexBufferIsValid = false;
+ fBoundIndexBuffer = 0;
+ fBoundIndexBufferIsValid = false;
+}
+
+void GrVkCommandBuffer::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(!fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unref(gpu);
+ }
+
+ // Destroy the fence, if any
+ if (VK_NULL_HANDLE != fSubmitFence) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ }
+
+ GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), gpu->cmdPool(),
+ 1, &fCmdBuffer));
+}
+
+void GrVkCommandBuffer::abandonSubResources() const {
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unrefAndAbandon();
+ }
+}
+
+void GrVkCommandBuffer::begin(const GrVkGpu* gpu) {
+ SkASSERT(!fIsActive);
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
+ &cmdBufferBeginInfo));
+ fIsActive = true;
+}
+
+void GrVkCommandBuffer::end(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
+ this->invalidateState();
+ fIsActive = false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrVkCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ const GrVkRenderTarget& target) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ VkRenderPassBeginInfo beginInfo;
+ VkSubpassContents contents;
+ renderPass->getBeginInfo(target, &beginInfo, &contents);
+ GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
+ fActiveRenderPass = renderPass;
+ this->addResource(renderPass);
+ target.addResources(*this);
+
+}
+
+void GrVkCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
+ fActiveRenderPass = nullptr;
+}
+
+void GrVkCommandBuffer::submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync) {
+ SkASSERT(!fIsActive);
+
+ VkResult err;
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
+ &fSubmitFence));
+ SkASSERT(!err);
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &fCmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), QueueSubmit(queue, 1, &submitInfo, fSubmitFence));
+
+ if (GrVkGpu::kForce_SyncQueue == sync) {
+ err = GR_VK_CALL(gpu->vkInterface(),
+ WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SkFAIL("failing");
+ }
+ SkASSERT(!err);
+
+ // Destroy the fence
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ fSubmitFence = VK_NULL_HANDLE;
+ }
+}
+
+bool GrVkCommandBuffer::finished(const GrVkGpu* gpu) const {
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ return true;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
+ switch (err) {
+ case VK_SUCCESS:
+ return true;
+
+ case VK_NOT_READY:
+ return false;
+
+ default:
+ SkDebugf("Error getting fence status: %d\n", err);
+ SkFAIL("failing");
+ break;
+ }
+
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CommandBuffer commands
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) const {
+ SkASSERT(fIsActive);
+ VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
+
+ switch (barrierType) {
+ case kMemory_BarrierType: {
+ const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 1, barrierPtr,
+ 0, nullptr,
+ 0, nullptr));
+ break;
+ }
+
+ case kBufferMemory_BarrierType: {
+ const VkBufferMemoryBarrier* barrierPtr =
+ reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 0, nullptr,
+ 1, barrierPtr,
+ 0, nullptr));
+ break;
+ }
+
+ case kImageMemory_BarrierType: {
+ const VkImageMemoryBarrier* barrierPtr =
+ reinterpret_cast<VkImageMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 0, nullptr,
+ 0, nullptr,
+ 1, barrierPtr));
+ break;
+ }
+ }
+
+}
+
+void GrVkCommandBuffer::copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcImage->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
+ srcImage->textureImage(),
+ srcLayout,
+ dstImage->textureImage(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcImage->resource());
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
+ srcImage->textureImage(),
+ srcLayout,
+ dstBuffer->buffer(),
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcBuffer->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
+ srcBuffer->buffer(),
+ dstImage->textureImage(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkCommandBuffer::clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(image->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
+ image->textureImage(),
+ image->currentLayout(),
+ color,
+ subRangeCount,
+ subRanges));
+}
+
+void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ SkASSERT(numAttachments > 0);
+ SkASSERT(numRects > 0);
+#ifdef SK_DEBUG
+ for (int i = 0; i < numAttachments; ++i) {
+ if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
+ uint32_t testIndex;
+ SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
+ SkASSERT(testIndex == attachments[i].colorAttachment);
+ }
+ }
+#endif
+ GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
+ numAttachments,
+ attachments,
+ numRects,
+ clearRects));
+}
+
+void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkProgram* program,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ layout,
+ firstSet,
+ setCount,
+ descriptorSets,
+ dynamicOffsetCount,
+ dynamicOffsets));
+ program->addUniformResources(*this);
+}
+
+void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
+ indexCount,
+ instanceCount,
+ firstIndex,
+ vertexOffset,
+ firstInstance));
+}
+
+void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance));
+}
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkCommandBuffer_DEFINED
+#define GrVkCommandBuffer_DEFINED
+
+#include "GrVkGpu.h"
+#include "GrVkPipeline.h"
+#include "GrVkResource.h"
+#include "GrVkUtil.h"
+#include "vulkan/vulkan.h"
+
+class GrVkRenderPass;
+class GrVkRenderTarget;
+class GrVkTransferBuffer;
+
+class GrVkCommandBuffer : public GrVkResource {
+public:
+ static GrVkCommandBuffer* Create(const GrVkGpu* gpu, VkCommandPool cmdPool);
+ ~GrVkCommandBuffer() override;
+
+ void begin(const GrVkGpu* gpu);
+ void end(const GrVkGpu* gpu);
+
+ void invalidateState();
+
+ // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
+ // in the render pass.
+ void beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ const GrVkRenderTarget& target);
+ void endRenderPass(const GrVkGpu* gpu);
+
+ void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync);
+ bool finished(const GrVkGpu* gpu) const;
+
+ ////////////////////////////////////////////////////////////////////////////
+ // CommandBuffer State/Object bindings
+ ////////////////////////////////////////////////////////////////////////////
+#if 0
+ void bindPipeline(const GrVkGpu* gpu) const;
+ void bindDynamicState(const GrVkGpu* gpu) const;
+ void bindDescriptorSet(const GrVkGpu* gpu) const;
+#endif
+
+ ////////////////////////////////////////////////////////////////////////////
+ // CommandBuffer commands
+ ////////////////////////////////////////////////////////////////////////////
+ enum BarrierType {
+ kMemory_BarrierType,
+ kBufferMemory_BarrierType,
+ kImageMemory_BarrierType
+ };
+
+ void pipelineBarrier(const GrVkGpu* gpu,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) const;
+
+ void bindVertexBuffer(GrVkGpu* gpu, GrVkVertexBuffer* vbuffer) {
+ VkBuffer vkBuffer = vbuffer->buffer();
+ if (!fBoundVertexBufferIsValid || vkBuffer != fBoundVertexBuffer) {
+ VkDeviceSize offset = 0;
+ GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
+ 0,
+ 1,
+ &vkBuffer,
+ &offset));
+ fBoundVertexBufferIsValid = true;
+ fBoundVertexBuffer = vkBuffer;
+ addResource(vbuffer->resource());
+ }
+ }
+
+ void bindIndexBuffer(GrVkGpu* gpu, GrVkIndexBuffer* ibuffer) {
+ VkBuffer vkBuffer = ibuffer->buffer();
+ if (!fBoundIndexBufferIsValid || vkBuffer != fBoundIndexBuffer) {
+ GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
+ vkBuffer,
+ 0,
+ VK_INDEX_TYPE_UINT16));
+ fBoundIndexBufferIsValid = true;
+ fBoundIndexBuffer = vkBuffer;
+ addResource(ibuffer->resource());
+ }
+ }
+
+ void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
+ GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->pipeline()));
+ addResource(pipeline);
+ }
+
+ void bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkProgram*,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+
+ // Commands that only work outside of a render pass
+ void clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges);
+
+ void copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions);
+
+ void copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ void copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ // Commands that only work inside of a render pass
+ void clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) const;
+
+ void drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const;
+
+ void draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const;
+
+ // Add ref-counted resource that will be tracked and released when this
+ // command buffer finishes execution
+ void addResource(const GrVkResource* resource) {
+ resource->ref();
+ fTrackedResources.push_back(resource);
+ }
+
+private:
+ static const int kInitialTrackedResourcesCount = 32;
+
+ explicit GrVkCommandBuffer(VkCommandBuffer cmdBuffer)
+ : fTrackedResources(kInitialTrackedResourcesCount)
+ , fCmdBuffer(cmdBuffer)
+ , fSubmitFence(VK_NULL_HANDLE)
+ , fBoundVertexBufferIsValid(false)
+ , fBoundIndexBufferIsValid(false)
+ , fIsActive(false)
+ , fActiveRenderPass(nullptr) {
+ this->invalidateState();
+ }
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+ void abandonSubResources() const override;
+
+ SkTArray<const GrVkResource*, true> fTrackedResources;
+
+ VkCommandBuffer fCmdBuffer;
+ VkFence fSubmitFence;
+
+ VkBuffer fBoundVertexBuffer;
+ bool fBoundVertexBufferIsValid;
+
+ VkBuffer fBoundIndexBuffer;
+ bool fBoundIndexBufferIsValid;
+
+ // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add new
+ // commands to the buffer;
+ bool fIsActive;
+
+ // Stores a pointer to the current active render pass (i.e. begin has been called but not end).
+ // A nullptr means there is no active render pass. The GrVKCommandBuffer does not own the render
+ // pass.
+ const GrVkRenderPass* fActiveRenderPass;
+};
+
+
+#endif
+
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkDescriptorPool.h"
+
+#include "GrVkGpu.h"
+#include "SkTemplates.h"
+
+
+GrVkDescriptorPool::GrVkDescriptorPool(const GrVkGpu* gpu, const DescriptorTypeCounts& typeCounts)
+ : INHERITED()
+ , fTypeCounts(typeCounts) {
+ uint32_t numPools = fTypeCounts.numPoolSizes();
+ SkAutoTDeleteArray<VkDescriptorPoolSize> poolSizes(new VkDescriptorPoolSize[numPools]);
+ int currentPool = 0;
+ for (int i = VK_DESCRIPTOR_TYPE_BEGIN_RANGE; i < VK_DESCRIPTOR_TYPE_END_RANGE; ++i) {
+ if (fTypeCounts.fDescriptorTypeCount[i]) {
+ VkDescriptorPoolSize& poolSize = poolSizes.get()[currentPool++];
+ poolSize.type = (VkDescriptorType)i;
+ poolSize.descriptorCount = fTypeCounts.fDescriptorTypeCount[i];
+ }
+ }
+ SkASSERT(currentPool == numPools);
+
+ VkDescriptorPoolCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkDescriptorPoolCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.maxSets = 2; // Currently we allow one set for samplers and one set for uniforms
+ createInfo.poolSizeCount = numPools;
+ createInfo.pPoolSizes = poolSizes.get();
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorPool(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fDescPool));
+}
+
+bool GrVkDescriptorPool::isCompatible(const DescriptorTypeCounts& typeCounts) const {
+ return fTypeCounts.isSuperSet(typeCounts);
+}
+
+void GrVkDescriptorPool::reset(const GrVkGpu* gpu) {
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), ResetDescriptorPool(gpu->device(), fDescPool, 0));
+}
+
+void GrVkDescriptorPool::freeGPUData(const GrVkGpu* gpu) const {
+ // Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
+ // allocated from the pool.
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorPool(gpu->device(), fDescPool, nullptr));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+uint32_t GrVkDescriptorPool::DescriptorTypeCounts::numPoolSizes() const {
+ uint32_t count = 0;
+ for (int i = VK_DESCRIPTOR_TYPE_BEGIN_RANGE; i < VK_DESCRIPTOR_TYPE_END_RANGE; ++i) {
+ count += fDescriptorTypeCount[i] ? 1 : 0;
+ }
+ return count;
+}
+
+bool GrVkDescriptorPool::DescriptorTypeCounts::isSuperSet(const DescriptorTypeCounts& that) const {
+ for (int i = VK_DESCRIPTOR_TYPE_BEGIN_RANGE; i < VK_DESCRIPTOR_TYPE_END_RANGE; ++i) {
+ if (that.fDescriptorTypeCount[i] > fDescriptorTypeCount[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrVkDescriptorPool::DescriptorTypeCounts::setTypeCount(VkDescriptorType type, uint8_t count) {
+ fDescriptorTypeCount[type] = count;
+}
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorPool_DEFINED
+#define GrVkDescriptorPool_DEFINED
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+
+class GrVkDescriptorPool : public GrVkResource {
+public:
+ class DescriptorTypeCounts {
+ public:
+ DescriptorTypeCounts() {
+ memset(fDescriptorTypeCount, 0, sizeof(fDescriptorTypeCount));
+ }
+
+ void setTypeCount(VkDescriptorType type, uint8_t count);
+ uint32_t numPoolSizes() const;
+
+ // Determines if for each i, that.fDescriptorTypeCount[i] <= fDescriptorTypeCount[i];
+ bool isSuperSet(const DescriptorTypeCounts& that) const;
+ private:
+ uint8_t fDescriptorTypeCount[VK_DESCRIPTOR_TYPE_RANGE_SIZE];
+
+ friend class GrVkDescriptorPool;
+ };
+
+ explicit GrVkDescriptorPool(const GrVkGpu* gpu, const DescriptorTypeCounts& typeCounts);
+
+ VkDescriptorPool descPool() const { return fDescPool; }
+
+ void reset(const GrVkGpu* gpu);
+
+ // Returns whether or not this descriptor pool could be used, assuming it gets fully reset and
+ // not in use by another draw, to support the requested typeCounts.
+ bool isCompatible(const DescriptorTypeCounts& typeCounts) const;
+
+private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ DescriptorTypeCounts fTypeCounts;
+ VkDescriptorPool fDescPool;
+
+ typedef GrVkResource INHERITED;
+};
+
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkFramebuffer.h"
+
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkRenderPass.h"
+
+GrVkFramebuffer* GrVkFramebuffer::Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* resolveAttachment,
+ const GrVkImageView* stencilAttachment) {
+ // At the very least we need a renderPass and a colorAttachment
+ SkASSERT(renderPass);
+ SkASSERT(colorAttachment);
+
+ VkImageView attachments[3];
+ attachments[0] = colorAttachment->imageView();
+ int numAttachments = 1;
+ if (resolveAttachment) {
+ attachments[numAttachments++] = resolveAttachment->imageView();
+ }
+ if (stencilAttachment) {
+ attachments[numAttachments++] = stencilAttachment->imageView();
+ }
+
+ VkFramebufferCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkFramebufferCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.renderPass = renderPass->vkRenderPass();
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments;
+ createInfo.width = width;
+ createInfo.height = height;
+ createInfo.layers = 1;
+
+ VkFramebuffer framebuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateFramebuffer(gpu->device(),
+ &createInfo,
+ nullptr,
+ &framebuffer));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkFramebuffer(framebuffer);
+}
+
+void GrVkFramebuffer::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fFramebuffer);
+ GR_VK_CALL(gpu->vkInterface(), DestroyFramebuffer(gpu->device(), fFramebuffer, nullptr));
+}
+
+
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkFramebuffer_DEFINED
+#define GrVkFramebuffer_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkImageView;
+class GrVkRenderPass;
+
+class GrVkFramebuffer : public GrVkResource {
+public:
+ static GrVkFramebuffer* Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* resolveAttachment,
+ const GrVkImageView* stencilAttachment);
+
+ VkFramebuffer framebuffer() const { return fFramebuffer; }
+
+private:
+ GrVkFramebuffer(VkFramebuffer framebuffer) : INHERITED(), fFramebuffer(framebuffer) {}
+
+ GrVkFramebuffer(const GrVkFramebuffer&);
+ GrVkFramebuffer& operator=(const GrVkFramebuffer&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkFramebuffer fFramebuffer;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkGpu.h"
+
+#include "GrContextOptions.h"
+#include "GrGeometryProcessor.h"
+#include "GrGpuResourceCacheAccess.h"
+#include "GrPipeline.h"
+#include "GrRenderTargetPriv.h"
+#include "GrSurfacePriv.h"
+#include "GrTexturePriv.h"
+#include "GrVertices.h"
+
+#include "GrVkCommandBuffer.h"
+#include "GrVkImage.h"
+#include "GrVkIndexBuffer.h"
+#include "GrVkMemory.h"
+#include "GrVkPipeline.h"
+#include "GrVkProgram.h"
+#include "GrVkProgramBuilder.h"
+#include "GrVkProgramDesc.h"
+#include "GrVkRenderPass.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkTexture.h"
+#include "GrVkTextureRenderTarget.h"
+#include "GrVkTransferBuffer.h"
+#include "GrVkVertexBuffer.h"
+
+#include "SkConfig8888.h"
+
+#include "vk/GrVkInterface.h"
+
+#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
+#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
+#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
+
+////////////////////////////////////////////////////////////////////////////////
+// Stuff used to set up a GrVkGpu secrectly for now.
+
+// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
+// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
+// and mostly likely would take an optional device and queues to use.
+GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
+ GrContext* context) {
+ // Below is Vulkan setup code that normal would be done by a client, but will do here for now
+ // for testing purposes.
+ VkPhysicalDevice physDev;
+ VkDevice device;
+ VkInstance inst;
+ VkResult err;
+
+ const VkApplicationInfo app_info = {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
+ nullptr, // pNext
+ "vktest", // pApplicationName
+ 0, // applicationVersion
+ "vktest", // pEngineName
+ 0, // engineVerison
+ VK_API_VERSION, // apiVersion
+ };
+ const VkInstanceCreateInfo instance_create = {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ &app_info, // pApplicationInfo
+ 0, // enabledLayerNameCount
+ nullptr, // ppEnabledLayerNames
+ 0, // enabledExtensionNameCount
+ nullptr, // ppEnabledExtensionNames
+ };
+ err = vkCreateInstance(&instance_create, nullptr, &inst);
+ if (err < 0) {
+ SkDebugf("vkCreateInstanced failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ uint32_t gpuCount;
+ err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ SkFAIL("failing");
+ }
+ SkASSERT(gpuCount > 0);
+ // Just returning the first physical device instead of getting the whole array.
+ gpuCount = 1;
+ err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ // query to get the initial queue props size
+ uint32_t queueCount;
+ vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
+ SkASSERT(queueCount >= 1);
+
+ SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
+ // now get the actual queue props
+ VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
+
+ vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
+
+ // iterate to find the graphics queue
+ uint32_t graphicsQueueIndex = -1;
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ graphicsQueueIndex = i;
+ break;
+ }
+ }
+ SkASSERT(graphicsQueueIndex < queueCount);
+
+ float queuePriorities[1] = { 0.0 };
+ const VkDeviceQueueCreateInfo queueInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ 0, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ };
+ const VkDeviceCreateInfo deviceInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceCreateFlags
+ 1, // queueCreateInfoCount
+ &queueInfo, // pQueueCreateInfos
+ 0, // layerCount
+ nullptr, // ppEnabledLayerNames
+ 0, // extensionCount
+ nullptr, // ppEnabledExtensionNames
+ nullptr // ppEnabledFeatures
+ };
+
+ err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
+ if (err) {
+ SkDebugf("CreateDevice failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ VkQueue queue;
+ vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
+
+ const VkCommandPoolCreateInfo cmdPoolInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // CmdPoolCreateFlags
+ graphicsQueueIndex, // queueFamilyIndex
+ };
+
+ VkCommandPool cmdPool;
+ err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
+ if (err) {
+ SkDebugf("CreateCommandPool failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
+ VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
+ VkInstance inst)
+ : INHERITED(context)
+ , fDevice(device)
+ , fQueue(queue)
+ , fCmdPool(cmdPool)
+ , fResourceProvider(this)
+ , fVkInstance(inst) {
+ fInterface.reset(GrVkCreateInterface(fVkInstance));
+ fCompiler = shaderc_compiler_initialize();
+
+ fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
+ fCaps.reset(SkRef(fVkCaps.get()));
+
+ fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->begin(this);
+ VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
+
+}
+
+GrVkGpu::~GrVkGpu() {
+ shaderc_compiler_release(fCompiler);
+ fCurrentCmdBuffer->end(this);
+ fCurrentCmdBuffer->unref(this);
+
+ // wait for all commands to finish
+ VK_CALL(QueueWaitIdle(fQueue));
+
+ // must call this just before we destroy the VkDevice
+ fResourceProvider.destroyResources();
+
+ VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
+ VK_CALL(DestroyDevice(fDevice, nullptr));
+ VK_CALL(DestroyInstance(fVkInstance, nullptr));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->end(this);
+
+ fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
+ fResourceProvider.checkCommandBuffers();
+
+ // Release old command buffer and create a new one
+ fCurrentCmdBuffer->unref(this);
+ fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+
+ fCurrentCmdBuffer->begin(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
+ return GrVkVertexBuffer::Create(this, size, dynamic);
+}
+
+GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
+ return GrVkIndexBuffer::Create(this, size, dynamic);
+}
+
+GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
+ GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
+ : GrVkBuffer::kCopyWrite_Type;
+ return GrVkTransferBuffer::Create(this, size, bufferType);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference* drawPreference,
+ WritePixelTempDrawInfo* tempDrawInfo) {
+ if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
+ return false;
+ }
+
+ // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
+ if (kNoDraw_DrawPreference != *drawPreference) {
+ return false;
+ }
+
+ if (dstSurface->config() != srcConfig) {
+ // TODO: This should fall back to drawing or copying to change config of dstSurface to
+ // match that of srcConfig.
+ return false;
+ }
+
+ return true;
+}
+
+bool GrVkGpu::onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) {
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
+ if (!vkTex) {
+ return false;
+ }
+
+ // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
+ if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
+ return false;
+ }
+
+ bool success = false;
+ if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
+ // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
+ SkASSERT(config == vkTex->desc().fConfig);
+ // TODO: add compressed texture support
+ // delete the following two lines and uncomment the two after that when ready
+ vkTex->unref();
+ return false;
+ //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
+ // height);
+ } else {
+ bool linearTiling = vkTex->isLinearTiled();
+ if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
+ // Need to change the layout to general in order to perform a host write
+ VkImageLayout layout = vkTex->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
+ vkTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_GENERAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+ }
+ success = this->uploadTexData(vkTex, left, top, width, height, config,
+ buffer, rowBytes);
+ }
+
+ if (success) {
+ vkTex->texturePriv().dirtyMipMaps(true);
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::uploadTexData(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes) {
+ SkASSERT(data);
+
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
+
+ bool linearTiling = tex->isLinearTiled();
+
+ size_t bpp = GrBytesPerPixel(dataConfig);
+
+ const GrSurfaceDesc& desc = tex->desc();
+
+ if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
+ &width, &height, &data, &rowBytes)) {
+ return false;
+ }
+ size_t trimRowBytes = width * bpp;
+
+ if (linearTiling) {
+ SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
+ VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+ VkResult err;
+
+ const GrVkInterface* interface = this->vkInterface();
+
+ GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
+ tex->textureImage(),
+ &subres,
+ &layout));
+
+ int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
+ : top;
+ VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
+ VkDeviceSize size = height*layout.rowPitch;
+ void* mapPtr;
+ err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
+ &mapPtr));
+ if (err) {
+ return false;
+ }
+
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ // copy into buffer by rows
+ const char* srcRow = reinterpret_cast<const char*>(data);
+ char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
+ for (int y = 0; y < height; y++) {
+ memcpy(dstRow, srcRow, trimRowBytes);
+ srcRow += rowBytes;
+ dstRow -= layout.rowPitch;
+ }
+ } else {
+ // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
+ if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
+ memcpy(mapPtr, data, trimRowBytes * height);
+ } else {
+ SkRectMemcpy(mapPtr, layout.rowPitch, data, rowBytes, trimRowBytes, height);
+ }
+ }
+
+ GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
+ } else {
+ GrVkTransferBuffer* transferBuffer =
+ GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
+
+ void* mapPtr = transferBuffer->map();
+
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ // copy into buffer by rows
+ const char* srcRow = reinterpret_cast<const char*>(data);
+ char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
+ for (int y = 0; y < height; y++) {
+ memcpy(dstRow, srcRow, trimRowBytes);
+ srcRow += rowBytes;
+ dstRow -= trimRowBytes;
+ }
+ } else {
+ // If there is no padding on the src data rows, we can do a single memcpy
+ if (trimRowBytes == rowBytes) {
+ memcpy(mapPtr, data, trimRowBytes * height);
+ } else {
+ SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
+ }
+ }
+
+ transferBuffer->unmap();
+
+ // make sure the unmap has finished
+ transferBuffer->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Set up copy region
+ bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
+ VkOffset3D offset = {
+ left,
+ flipY ? tex->height() - top - height : top,
+ 0
+ };
+
+ VkBufferImageCopy region;
+ memset(®ion, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = 0;
+ region.bufferRowLength = width;
+ region.bufferImageHeight = height;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = offset;
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ // Change layout of our target so it can be copied to
+ VkImageLayout layout = tex->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ tex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ transferBuffer,
+ tex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ ®ion);
+
+ // Submit the current command buffer to the Queue
+ this->submitCommandBuffer(kSkip_SyncQueue);
+
+ transferBuffer->unref();
+ }
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
+ const void* srcData, size_t rowBytes) {
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
+ return nullptr;
+ }
+
+ if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
+ return nullptr;
+ }
+
+ bool linearTiling = false;
+ if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
+ if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
+ (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
+ linearTiling = true;
+ } else {
+ return nullptr;
+ }
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (renderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
+ // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
+ // will be using this texture in some copy or not. Also this assumes, as is the current case,
+ // that all render targets in vulkan are also texutres. If we change this practice of setting
+ // both bits, we must make sure to set the destination bit if we are uploading srcData to the
+ // texture.
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
+ // to 1.
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = desc.fWidth;
+ imageDesc.fHeight = desc.fHeight;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = memProps;
+
+ GrVkTexture* tex;
+ if (renderTarget) {
+ tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
+ imageDesc);
+ } else {
+ tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
+ }
+
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (srcData) {
+ if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig, srcData,
+ rowBytes)) {
+ tex->unref();
+ return nullptr;
+ }
+ }
+
+ return tex;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
+ // By default, all textures in Vk use TopLeft
+ if (kDefault_GrSurfaceOrigin == origin) {
+ return kTopLeft_GrSurfaceOrigin;
+ } else {
+ return origin;
+ }
+}
+
+GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
+ GrWrapOwnership ownership) {
+ VkFormat format;
+ if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
+ return nullptr;
+ }
+
+ if (0 == desc.fTextureHandle) {
+ return nullptr;
+ }
+
+ int maxSize = this->caps()->maxTextureSize();
+ if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
+ return nullptr;
+ }
+
+ // TODO: determine what format Chrome will actually send us and turn it into a Resource
+ GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
+
+ GrGpuResource::LifeCycle lifeCycle;
+ switch (ownership) {
+ case kAdopt_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kAdopted_LifeCycle;
+ break;
+ case kBorrow_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
+ break;
+ }
+
+ GrSurfaceDesc surfDesc;
+ // next line relies on GrBackendTextureDesc's flags matching GrTexture's
+ surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
+ surfDesc.fWidth = desc.fWidth;
+ surfDesc.fHeight = desc.fHeight;
+ surfDesc.fConfig = desc.fConfig;
+ surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
+ // In GL, Chrome assumes all textures are BottomLeft
+ // In VK, we don't have this restriction
+ surfDesc.fOrigin = resolve_origin(desc.fOrigin);
+
+ GrVkTexture* texture = nullptr;
+ if (renderTarget) {
+ texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
+ lifeCycle, format,
+ imageRsrc);
+ } else {
+ texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
+ }
+ if (!texture) {
+ return nullptr;
+ }
+
+ return texture;
+}
+
+GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
+ GrWrapOwnership ownership) {
+
+ // TODO: determine what format Chrome will actually send us and turn it into a Resource
+ GrVkImage::Resource* imageRsrc =
+ reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
+
+ GrGpuResource::LifeCycle lifeCycle;
+ switch (ownership) {
+ case kAdopt_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kAdopted_LifeCycle;
+ break;
+ case kBorrow_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
+ break;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fConfig = wrapDesc.fConfig;
+ desc.fFlags = kCheckAllocation_GrSurfaceFlag;
+ desc.fWidth = wrapDesc.fWidth;
+ desc.fHeight = wrapDesc.fHeight;
+ desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
+
+ desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
+
+ GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
+ lifeCycle, imageRsrc);
+ if (tgt && wrapDesc.fStencilBits) {
+ if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
+ tgt->unref();
+ return nullptr;
+ }
+ }
+ return tgt;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
+ const GrNonInstancedVertices& vertices) {
+ GrVkVertexBuffer* vbuf;
+ vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer();
+ SkASSERT(vbuf);
+ SkASSERT(!vbuf->isMapped());
+
+ vbuf->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ false);
+
+ fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
+
+ if (vertices.isIndexed()) {
+ GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer();
+ SkASSERT(ibuf);
+ SkASSERT(!ibuf->isMapped());
+
+ ibuf->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_INDEX_READ_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ false);
+
+ fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
+ }
+}
+
+void GrVkGpu::buildProgramDesc(GrProgramDesc* desc,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) const {
+ if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
+ SkDEBUGFAIL("Failed to generate GL program descriptor");
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
+ int width,
+ int height) {
+ SkASSERT(rt->asTexture());
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ int samples = rt->numStencilSamples();
+
+ SkASSERT(this->vkCaps().stencilFormats().count());
+ const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
+
+ GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
+ GrGpuResource::kCached_LifeCycle,
+ width,
+ height,
+ samples,
+ sFmt));
+ fStats.incStencilAttachmentCreates();
+ return stencil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
+ GrPixelConfig config) {
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return 0;
+ }
+
+ bool linearTiling = false;
+ if (!fVkCaps->isConfigTexturable(config)) {
+ return 0;
+ }
+
+ if (fVkCaps->isConfigTexurableLinearly(config)) {
+ linearTiling = true;
+ }
+
+ // Currently this is not supported since it requires a copy which has not yet been implemented.
+ if (srcData && !linearTiling) {
+ return 0;
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
+ // to 1.
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = w;
+ imageDesc.fHeight = h;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = memProps;
+
+ const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
+ if (!imageRsrc) {
+ return 0;
+ }
+
+ if (srcData) {
+ if (linearTiling) {
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+ VkResult err;
+
+ const GrVkInterface* interface = this->vkInterface();
+
+ GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
+ imageRsrc->fImage,
+ &subres,
+ &layout));
+
+ void* mapPtr;
+ err = GR_VK_CALL(interface, MapMemory(fDevice,
+ imageRsrc->fAlloc,
+ 0,
+ layout.rowPitch * h,
+ 0,
+ &mapPtr));
+ if (err) {
+ imageRsrc->unref(this);
+ return 0;
+ }
+
+ size_t bpp = GrBytesPerPixel(config);
+ size_t rowCopyBytes = bpp * w;
+ // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
+ // This assumes the srcData comes in with no padding.
+ if (rowCopyBytes == layout.rowPitch) {
+ memcpy(mapPtr, srcData, rowCopyBytes * h);
+ } else {
+ SkRectMemcpy(mapPtr, layout.rowPitch, srcData, w, rowCopyBytes, h);
+ }
+ GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
+ } else {
+ // TODO: Add support for copying to optimal tiling
+ SkASSERT(false);
+ }
+ }
+
+ return (GrBackendObject)imageRsrc;
+}
+
+bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
+ GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
+
+ if (backend && backend->fImage && backend->fAlloc) {
+ VkMemoryRequirements req;
+ memset(&req, 0, sizeof(req));
+ GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
+ backend->fImage,
+ &req));
+ // TODO: find a better check
+ // This will probably fail with a different driver
+ return (req.size > 0) && (req.size <= 8192 * 8192);
+ }
+
+ return false;
+}
+
+void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
+ GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
+
+ if (backend) {
+ if (!abandon) {
+ backend->unref(this);
+ } else {
+ backend->unrefAndAbandon();
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kBufferMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kImageMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::finishDrawTarget() {
+ // Submit the current command buffer to the Queue
+ this->submitCommandBuffer(kSkip_SyncQueue);
+}
+
+void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
+ // parent class should never let us get here with no RT
+ SkASSERT(target);
+
+ VkClearColorValue vkColor;
+ GrColorToRGBAFloat(color, vkColor.float32);
+
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
+ VkImageLayout origDstLayout = vkRT->currentLayout();
+
+ if (rect.width() != target->width() || rect.height() != target->height()) {
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ VkPipelineStageFlags srcStageMask =
+ GrVkMemory::LayoutToPipelineStageFlags(vkRT->currentLayout());
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ VkClearRect clearRect;
+ clearRect.rect.offset = { rect.fLeft, rect.fTop };
+ clearRect.rect.extent = { (uint32_t)rect.width(), (uint32_t)rect.height() };
+ clearRect.baseArrayLayer = 0;
+ clearRect.layerCount = 1;
+
+
+
+ const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
+ SkASSERT(renderPass);
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
+
+ uint32_t colorIndex;
+ SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachment.colorAttachment = colorIndex;
+ attachment.clearValue.color = vkColor;
+
+ fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
+ fCurrentCmdBuffer->endRenderPass(this);
+ return;
+ }
+
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+
+ VkImageSubresourceRange subRange;
+ memset(&subRange, 0, sizeof(VkImageSubresourceRange));
+ subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ subRange.baseMipLevel = 0;
+ subRange.levelCount = 1;
+ subRange.baseArrayLayer = 0;
+ subRange.layerCount = 1;
+
+ // In the future we may not actually be doing this type of clear at all. If we are inside a
+ // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
+ // common use case will be clearing an attachment at the start of a render pass, in which case
+ // we will use the clear load ops.
+ fCurrentCmdBuffer->clearColorImage(this,
+ vkRT,
+ &vkColor,
+ 1, &subRange);
+}
+
+inline bool can_copy_image(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ if (src->asTexture() &&
+ dst->asTexture() &&
+ src->origin() == dst->origin() &&
+ src->config() == dst->config()) {
+ return true;
+ }
+
+ // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
+ // or the resolved image here?
+
+ return false;
+}
+
+void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_copy_image(dst, src, this));
+
+ // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
+ GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
+ GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
+
+ VkImageLayout origDstLayout = dstTex->currentLayout();
+ VkImageLayout origSrcLayout = srcTex->currentLayout();
+
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
+ // the cache is flushed since it is only being written to.
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ dstTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
+ dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
+ dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+
+ srcTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ // Flip rect if necessary
+ SkIRect srcVkRect = srcRect;
+ int32_t dstY = dstPoint.fY;
+
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
+ srcVkRect.fTop = src->height() - srcRect.fBottom;
+ srcVkRect.fBottom = src->height() - srcRect.fTop;
+ dstY = dst->height() - dstPoint.fY - srcVkRect.height();
+ }
+
+ VkImageCopy copyRegion;
+ memset(©Region, 0, sizeof(VkImageCopy));
+ copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
+ copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
+ copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
+
+ fCurrentCmdBuffer->copyImage(this,
+ srcTex,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ dstTex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ ©Region);
+}
+
+inline bool can_copy_as_draw(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ return false;
+}
+
+void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(false);
+}
+
+bool GrVkGpu::onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (can_copy_image(dst, src, this)) {
+ this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ if (can_copy_as_draw(dst, src, this)) {
+ this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference* drawPreference,
+ ReadPixelTempDrawInfo* tempDrawInfo) {
+ // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
+ if (kNoDraw_DrawPreference != *drawPreference) {
+ return false;
+ }
+
+ if (srcSurface->config() != readConfig) {
+ // TODO: This should fall back to drawing or copying to change config of srcSurface to match
+ // that of readConfig.
+ return false;
+ }
+
+ return true;
+}
+
+bool GrVkGpu::onReadPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return false;
+ }
+
+ GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
+ if (!tgt) {
+ return false;
+ }
+
+ // Change layout of our target so it can be used as copy
+ VkImageLayout layout = tgt->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ tgt->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ GrVkTransferBuffer* transferBuffer =
+ reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
+ kGpuToCpu_TransferType));
+
+ bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
+ VkOffset3D offset = {
+ left,
+ flipY ? surface->height() - top - height : top,
+ 0
+ };
+
+ // Copy the image to a buffer so we can map it to cpu memory
+ VkBufferImageCopy region;
+ memset(®ion, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = 0;
+ region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
+ region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = offset;
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ fCurrentCmdBuffer->copyImageToBuffer(this,
+ tgt,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ transferBuffer,
+ 1,
+ ®ion);
+
+ // make sure the copy to buffer has finished
+ transferBuffer->addMemoryBarrier(this,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+
+ // We need to submit the current command buffer to the Queue and make sure it finishes before
+ // we can copy the data out of the buffer.
+ this->submitCommandBuffer(kForce_SyncQueue);
+
+ void* mappedMemory = transferBuffer->map();
+
+ memcpy(buffer, mappedMemory, rowBytes*height);
+
+ transferBuffer->unmap();
+ transferBuffer->unref();
+
+ if (flipY) {
+ SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
+ size_t tightRowBytes = GrBytesPerPixel(config) * width;
+ scratch.reset(tightRowBytes);
+ void* tmpRow = scratch.get();
+ // flip y in-place by rows
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * rowBytes;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmpRow, top, tightRowBytes);
+ memcpy(top, bottom, tightRowBytes);
+ memcpy(bottom, tmpRow, tightRowBytes);
+ top += rowBytes;
+ bottom -= rowBytes;
+ }
+ }
+
+ return true;
+}
+
+void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
+ GrRenderTarget* rt = args.fPipeline->getRenderTarget();
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
+ const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
+ SkASSERT(renderPass);
+
+
+ GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args,
+ vertices.primitiveType(),
+ *renderPass);
+
+ if (!program) {
+ return;
+ }
+
+ program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline);
+
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
+
+ program->bind(this, fCurrentCmdBuffer);
+
+ this->bindGeometry(*args.fPrimitiveProcessor, vertices);
+
+ // Change layout of our render target so it can be used as the color attachment
+ VkImageLayout layout = vkRT->currentLayout();
+ // Our color attachment is purely a destination and won't be read so don't need to flush or
+ // invalidate any caches
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ if (vertices.isIndexed()) {
+ fCurrentCmdBuffer->drawIndexed(this,
+ vertices.indexCount(),
+ 1,
+ vertices.startIndex(),
+ vertices.startVertex(),
+ 0);
+ } else {
+ fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startVertex(), 0);
+ }
+
+ fCurrentCmdBuffer->endRenderPass(this);
+
+ // Technically we don't have to call this here (since there is a safety check in program:setData
+ // but this will allow for quicker freeing of resources if the program sits in a cache for a
+ // while.
+ program->freeTempResources(this);
+ // This free will go away once we setup a program cache, and then the cache will be responsible
+ // for call freeGpuResources.
+ program->freeGPUResources(this);
+ program->unref();
+
+#if SWAP_PER_DRAW
+ glFlush();
+#if defined(SK_BUILD_FOR_MAC)
+ aglSwapBuffers(aglGetCurrentContext());
+ int set_a_break_pt_here = 9;
+ aglSwapBuffers(aglGetCurrentContext());
+#elif defined(SK_BUILD_FOR_WIN32)
+ SwapBuf();
+ int set_a_break_pt_here = 9;
+ SwapBuf();
+#endif
+#endif
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkGpu_DEFINED
+#define GrVkGpu_DEFINED
+
+#include "GrGpu.h"
+#include "GrGpuFactory.h"
+#include "GrVkCaps.h"
+#include "GrVkIndexBuffer.h"
+#include "GrVkProgram.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkVertexBuffer.h"
+#include "GrVkUtil.h"
+
+#include "shaderc/shaderc.h"
+#include "vulkan/vulkan.h"
+
+class GrPipeline;
+class GrNonInstancedVertices;
+
+class GrVkBufferImpl;
+class GrVkCommandBuffer;
+class GrVkPipeline;
+class GrVkRenderPass;
+class GrVkTexture;
+struct GrVkInterface;
+
+class GrVkGpu : public GrGpu {
+public:
+ // Currently passing in the inst so that we can properly delete it when we are done.
+ // Normally this would be done by the client.
+ GrVkGpu(GrContext* context, const GrContextOptions& options,
+ VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
+ VkInstance inst);
+ ~GrVkGpu() override;
+
+ const GrVkInterface* vkInterface() const { return fInterface.get(); }
+ const GrVkCaps& vkCaps() const { return *fVkCaps; }
+
+ VkDevice device() const { return fDevice; }
+ VkQueue queue() const { return fQueue; }
+ VkCommandPool cmdPool() const { return fCmdPool; }
+ VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const {
+ return fPhysDevMemProps;
+ }
+
+ GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
+
+ enum SyncQueue {
+ kForce_SyncQueue,
+ kSkip_SyncQueue
+ };
+
+ bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference*,
+ ReadPixelTempDrawInfo*) override;
+
+ bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference*,
+ WritePixelTempDrawInfo*) override;
+
+ void buildProgramDesc(GrProgramDesc*, const GrPrimitiveProcessor&,
+ const GrPipeline&) const override;
+
+ void discard(GrRenderTarget*) override {
+ SkDebugf("discard not yet implemented for Vulkan\n");
+ }
+
+ bool onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ bool initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const override {
+ SkDebugf("initCopySurfaceDstDesc not yet implemented for Vulkan\n");
+ return false;
+ }
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config) override;
+ bool isTestingOnlyBackendTexture(GrBackendObject id) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) override;
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
+ int width,
+ int height) override;
+
+ void clearStencil(GrRenderTarget* target) override {
+ SkDebugf("clearStencil not yet implemented for Vulkan\n");
+ }
+
+ void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) override {
+ SkDebugf("drawDebugWireRect not yet implemented for Vulkan\n");
+ }
+
+ void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkMemoryBarrier* barrier) const;
+ void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const;
+ void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const;
+
+ shaderc_compiler_t shadercCompiler() const {
+ return fCompiler;
+ }
+
+ void finishDrawTarget() override;
+
+private:
+ void onResetContext(uint32_t resetBits) override {
+ SkDebugf("onResetContext not yet implemented for Vulkan\n");
+ }
+
+ GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle,
+ const void* srcData, size_t rowBytes) override;
+
+ GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle,
+ const void* srcData) override {
+ SkDebugf("onCreateCompressedTexture not yet implemented for Vulkan\n");
+ return NULL;
+ }
+
+ GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
+
+ GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
+ GrWrapOwnership) override;
+
+ GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
+ GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
+ GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
+
+ void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override;
+
+ void onClearStencilClip(GrRenderTarget*, const SkIRect& rect, bool insideClip) override {
+ SkDebugf("onClearStencilClip not yet implemented for Vulkan\n");
+ }
+
+ void onDraw(const DrawArgs&, const GrNonInstancedVertices&) override;
+
+ bool onReadPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig,
+ void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer, size_t rowBytes) override;
+
+ bool onTransferPixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrTransferBuffer* buffer,
+ size_t offset, size_t rowBytes) override {
+ SkDebugf("onTransferPixels not yet implemented for Vulkan\n");
+ return false;
+ }
+
+ void onResolveRenderTarget(GrRenderTarget* target) override {
+ SkDebugf("onResolveRenderTarget not yet implemented for Vulkan\n");
+ }
+
+ // Bind vertex and index buffers
+ void bindGeometry(const GrPrimitiveProcessor&, const GrNonInstancedVertices&);
+
+ // Ends and submits the current command buffer to the queue and then creates a new command
+ // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
+ // work in the queue to finish before returning.
+ void submitCommandBuffer(SyncQueue sync);
+
+ void copySurfaceAsCopyImage(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ // helper for onCreateTexture and writeTexturePixels
+ bool uploadTexData(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes);
+
+ SkAutoTUnref<const GrVkInterface> fInterface;
+ SkAutoTUnref<GrVkCaps> fVkCaps;
+ VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
+ VkDevice fDevice;
+ VkQueue fQueue; // for now, one queue
+ VkCommandPool fCmdPool;
+ GrVkCommandBuffer* fCurrentCmdBuffer;
+ GrVkResourceProvider fResourceProvider;
+
+ // Shaderc compiler used for compiling glsl in spirv. We only want to create the compiler once
+ // since there is significant overhead to the first compile of any compiler.
+ shaderc_compiler_t fCompiler;
+
+ // This is only for our current testing and building. The client should be holding on to the
+ // VkInstance.
+ VkInstance fVkInstance;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkGpu.h"
+#include "GrVkImage.h"
+#include "GrVkMemory.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) {
+ SkASSERT(VK_IMAGE_LAYOUT_GENERAL != newLayout || VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
+ // Is this reasonable? Could someone want to keep the same layout but use the masks to force
+ // a barrier on certain things?
+ if (newLayout == fCurrentLayout) {
+ return;
+ }
+
+ VkImageMemoryBarrier imageMemoryBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
+ NULL, // pNext
+ srcAccessMask, // outputMask
+ dstAccessMask, // inputMask
+ fCurrentLayout, // oldLayout
+ newLayout, // newLayout
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ fResource->fImage, // image
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
+ };
+
+ // TODO: restrict to area of image we're interested in
+ gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemoryBarrier);
+
+ fCurrentLayout = newLayout;
+}
+
+const GrVkImage::Resource* GrVkImage::CreateResource(const GrVkGpu* gpu,
+ const ImageDesc& imageDesc) {
+ VkImage image = 0;
+ VkDeviceMemory alloc;
+
+ VkResult err;
+
+ VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling)
+ ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
+ return nullptr;
+ }
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ NULL, // pNext
+ 0, // VkImageCreateFlags
+ imageDesc.fImageType, // VkImageType
+ imageDesc.fFormat, // VkFormat
+ { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
+ imageDesc.fLevels, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageDesc.fImageTiling, // VkImageTiling
+ imageDesc.fUsageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ err = VK_CALL(gpu, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
+ SkASSERT(!err);
+
+ if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, imageDesc.fMemProps, &alloc)) {
+ VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
+ return nullptr;
+ }
+
+ GrVkImage::Resource::Flags flags =
+ (VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling) ? Resource::kLinearTiling_Flag
+ : Resource::kNo_Flags;
+
+ return (new GrVkImage::Resource(image, alloc, flags));
+}
+
+GrVkImage::~GrVkImage() {
+ // should have been released or abandoned first
+ SkASSERT(!fResource);
+}
+
+void GrVkImage::releaseImage(const GrVkGpu* gpu) {
+ if (fResource) {
+ fResource->unref(gpu);
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::abandonImage() {
+ if (fResource) {
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const {
+ VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
+ VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkImage_DEFINED
+#define GrVkImage_DEFINED
+
+#include "GrVkResource.h"
+#include "SkTypes.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+
+class GrVkImage : SkNoncopyable {
+public:
+ // unlike GrVkBuffer, this needs to be public so GrVkStencilAttachment can use it
+ class Resource : public GrVkResource {
+ public:
+ enum Flags {
+ kNo_Flags = 0,
+ kLinearTiling_Flag = 0x01
+ };
+
+ VkImage fImage;
+ VkDeviceMemory fAlloc;
+ Flags fFlags;
+
+ Resource() : INHERITED(), fImage(nullptr), fAlloc(nullptr), fFlags(kNo_Flags) {}
+
+ Resource(VkImage image, VkDeviceMemory alloc, Flags flags)
+ : fImage(image), fAlloc(alloc), fFlags(flags) {}
+
+ ~Resource() override {}
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ typedef GrVkResource INHERITED;
+ };
+
+
+ GrVkImage(const Resource* imageResource) : fResource(imageResource) {
+ if (imageResource->fFlags & Resource::kLinearTiling_Flag) {
+ fCurrentLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
+ } else {
+ fCurrentLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ imageResource->ref();
+ }
+
+ virtual ~GrVkImage();
+
+ VkImage textureImage() const { return fResource->fImage; }
+ VkDeviceMemory textureMemory() const { return fResource->fAlloc; }
+ const Resource* resource() const { return fResource; }
+ bool isLinearTiled() const {
+ return SkToBool(fResource->fFlags & Resource::kLinearTiling_Flag);
+ }
+
+ VkImageLayout currentLayout() const { return fCurrentLayout; }
+
+ void setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion);
+
+ struct ImageDesc {
+ VkImageType fImageType;
+ VkFormat fFormat;
+ uint32_t fWidth;
+ uint32_t fHeight;
+ uint32_t fLevels;
+ uint32_t fSamples;
+ VkImageTiling fImageTiling;
+ VkImageUsageFlags fUsageFlags;
+ VkFlags fMemProps;
+
+ ImageDesc()
+ : fImageType(VK_IMAGE_TYPE_2D)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fWidth(0)
+ , fHeight(0)
+ , fLevels(1)
+ , fSamples(1)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fUsageFlags(0)
+ , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {}
+ };
+
+ static const Resource* CreateResource(const GrVkGpu* gpu, const ImageDesc& imageDesc);
+
+protected:
+
+ void releaseImage(const GrVkGpu* gpu);
+ void abandonImage();
+
+ const Resource* fResource;
+
+ VkImageLayout fCurrentLayout;
+
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkImageView.h"
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+const GrVkImageView* GrVkImageView::Create(GrVkGpu* gpu, VkImage image, VkFormat format,
+ Type viewType) {
+ VkImageView imageView;
+
+ // Create the VkImageView
+ VkImageViewCreateInfo viewInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
+ NULL, // pNext
+ 0, // flags
+ image, // image
+ VK_IMAGE_VIEW_TYPE_2D, // viewType
+ format, // format
+ { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }, // components
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }, // subresourceRange
+ };
+ if (kStencil_Type == viewType) {
+ viewInfo.components.r = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.components.g = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.components.b = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.components.a = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateImageView(gpu->device(), &viewInfo,
+ nullptr, &imageView));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkImageView(imageView);
+}
+
+void GrVkImageView::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr));
+}
+
+
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkImageView_DEFINED
+#define GrVkImageView_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkImageView : public GrVkResource {
+public:
+ enum Type {
+ kColor_Type,
+ kStencil_Type
+ };
+
+ static const GrVkImageView* Create(GrVkGpu* gpu, VkImage image, VkFormat format, Type viewType);
+
+ VkImageView imageView() const { return fImageView; }
+
+private:
+ GrVkImageView(VkImageView imageView) : INHERITED(), fImageView(imageView) {}
+
+ GrVkImageView(const GrVkImageView&);
+ GrVkImageView& operator=(const GrVkImageView&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkImageView fImageView;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkIndexBuffer.h"
+#include "GrVkGpu.h"
+
+GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache();
+}
+
+GrVkIndexBuffer* GrVkIndexBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kIndex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+
+ GrVkIndexBuffer* buffer = new GrVkIndexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+void GrVkIndexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkIndexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void* GrVkIndexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ return this->vkMap(this->getVkGpu());
+ } else {
+ return NULL;
+ }
+}
+
+void GrVkIndexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkIndexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkIndexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkIndexBuffer_DEFINED
+#define GrVkIndexBuffer_DEFINED
+
+#include "GrIndexBuffer.h"
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkIndexBuffer : public GrIndexBuffer, public GrVkBuffer {
+
+public:
+ static GrVkIndexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void* onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrIndexBuffer INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "vk/GrVkInterface.h"
+
+GrVkInterface::GrVkInterface() {
+}
+
+#define GET_PROC(F) functions->f ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
+
+const GrVkInterface* GrVkCreateInterface(VkInstance instance) {
+
+ GrVkInterface* interface = new GrVkInterface();
+ GrVkInterface::Functions* functions = &interface->fFunctions;
+
+ GET_PROC(CreateInstance);
+ GET_PROC(DestroyInstance);
+ GET_PROC(EnumeratePhysicalDevices);
+ GET_PROC(GetPhysicalDeviceFeatures);
+ GET_PROC(GetPhysicalDeviceFormatProperties);
+ GET_PROC(GetPhysicalDeviceImageFormatProperties);
+ GET_PROC(GetPhysicalDeviceProperties);
+ GET_PROC(GetPhysicalDeviceQueueFamilyProperties);
+ GET_PROC(GetPhysicalDeviceMemoryProperties);
+ GET_PROC(CreateDevice);
+ GET_PROC(DestroyDevice);
+ GET_PROC(EnumerateInstanceExtensionProperties);
+ GET_PROC(EnumerateDeviceExtensionProperties);
+ GET_PROC(EnumerateInstanceLayerProperties);
+ GET_PROC(EnumerateDeviceLayerProperties);
+ GET_PROC(GetDeviceQueue);
+ GET_PROC(QueueSubmit);
+ GET_PROC(QueueWaitIdle);
+ GET_PROC(DeviceWaitIdle);
+ GET_PROC(AllocateMemory);
+ GET_PROC(FreeMemory);
+ GET_PROC(MapMemory);
+ GET_PROC(UnmapMemory);
+ GET_PROC(FlushMappedMemoryRanges);
+ GET_PROC(InvalidateMappedMemoryRanges);
+ GET_PROC(GetDeviceMemoryCommitment);
+ GET_PROC(BindBufferMemory);
+ GET_PROC(BindImageMemory);
+ GET_PROC(GetBufferMemoryRequirements);
+ GET_PROC(GetImageMemoryRequirements);
+ GET_PROC(GetImageSparseMemoryRequirements);
+ GET_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+ GET_PROC(QueueBindSparse);
+ GET_PROC(CreateFence);
+ GET_PROC(DestroyFence);
+ GET_PROC(ResetFences);
+ GET_PROC(GetFenceStatus);
+ GET_PROC(WaitForFences);
+ GET_PROC(CreateSemaphore);
+ GET_PROC(DestroySemaphore);
+ GET_PROC(CreateEvent);
+ GET_PROC(DestroyEvent);
+ GET_PROC(GetEventStatus);
+ GET_PROC(SetEvent);
+ GET_PROC(ResetEvent);
+ GET_PROC(CreateQueryPool);
+ GET_PROC(DestroyQueryPool);
+ GET_PROC(GetQueryPoolResults);
+ GET_PROC(CreateBuffer);
+ GET_PROC(DestroyBuffer);
+ GET_PROC(CreateBufferView);
+ GET_PROC(DestroyBufferView);
+ GET_PROC(CreateImage);
+ GET_PROC(DestroyImage);
+ GET_PROC(GetImageSubresourceLayout);
+ GET_PROC(CreateImageView);
+ GET_PROC(DestroyImageView);
+ GET_PROC(CreateShaderModule);
+ GET_PROC(DestroyShaderModule);
+ GET_PROC(CreatePipelineCache);
+ GET_PROC(DestroyPipelineCache);
+ GET_PROC(GetPipelineCacheData);
+ GET_PROC(MergePipelineCaches);
+ GET_PROC(CreateGraphicsPipelines);
+ GET_PROC(CreateComputePipelines);
+ GET_PROC(DestroyPipeline);
+ GET_PROC(CreatePipelineLayout);
+ GET_PROC(DestroyPipelineLayout);
+ GET_PROC(CreateSampler);
+ GET_PROC(DestroySampler);
+ GET_PROC(CreateDescriptorSetLayout);
+ GET_PROC(DestroyDescriptorSetLayout);
+ GET_PROC(CreateDescriptorPool);
+ GET_PROC(DestroyDescriptorPool);
+ GET_PROC(ResetDescriptorPool);
+ GET_PROC(AllocateDescriptorSets);
+ GET_PROC(FreeDescriptorSets);
+ GET_PROC(UpdateDescriptorSets);
+ GET_PROC(CreateFramebuffer);
+ GET_PROC(DestroyFramebuffer);
+ GET_PROC(CreateRenderPass);
+ GET_PROC(DestroyRenderPass);
+ GET_PROC(GetRenderAreaGranularity);
+ GET_PROC(CreateCommandPool);
+ GET_PROC(DestroyCommandPool);
+ GET_PROC(ResetCommandPool);
+ GET_PROC(AllocateCommandBuffers);
+ GET_PROC(FreeCommandBuffers);
+ GET_PROC(BeginCommandBuffer);
+ GET_PROC(EndCommandBuffer);
+ GET_PROC(ResetCommandBuffer);
+ GET_PROC(CmdBindPipeline);
+ GET_PROC(CmdSetViewport);
+ GET_PROC(CmdSetScissor);
+ GET_PROC(CmdSetLineWidth);
+ GET_PROC(CmdSetDepthBias);
+ GET_PROC(CmdSetBlendConstants);
+ GET_PROC(CmdSetDepthBounds);
+ GET_PROC(CmdSetStencilCompareMask);
+ GET_PROC(CmdSetStencilWriteMask);
+ GET_PROC(CmdSetStencilReference);
+ GET_PROC(CmdBindDescriptorSets);
+ GET_PROC(CmdBindIndexBuffer);
+ GET_PROC(CmdBindVertexBuffers);
+ GET_PROC(CmdDraw);
+ GET_PROC(CmdDrawIndexed);
+ GET_PROC(CmdDrawIndirect);
+ GET_PROC(CmdDrawIndexedIndirect);
+ GET_PROC(CmdDispatch);
+ GET_PROC(CmdDispatchIndirect);
+ GET_PROC(CmdCopyBuffer);
+ GET_PROC(CmdCopyImage);
+ GET_PROC(CmdBlitImage);
+ GET_PROC(CmdCopyBufferToImage);
+ GET_PROC(CmdCopyImageToBuffer);
+ GET_PROC(CmdUpdateBuffer);
+ GET_PROC(CmdFillBuffer);
+ GET_PROC(CmdClearColorImage);
+ GET_PROC(CmdClearDepthStencilImage);
+ GET_PROC(CmdClearAttachments);
+ GET_PROC(CmdResolveImage);
+ GET_PROC(CmdSetEvent);
+ GET_PROC(CmdResetEvent);
+ GET_PROC(CmdWaitEvents);
+ GET_PROC(CmdPipelineBarrier);
+ GET_PROC(CmdBeginQuery);
+ GET_PROC(CmdEndQuery);
+ GET_PROC(CmdResetQueryPool);
+ GET_PROC(CmdWriteTimestamp);
+ GET_PROC(CmdCopyQueryPoolResults);
+ GET_PROC(CmdPushConstants);
+ GET_PROC(CmdBeginRenderPass);
+ GET_PROC(CmdNextSubpass);
+ GET_PROC(CmdEndRenderPass);
+ GET_PROC(CmdExecuteCommands);
+ GET_PROC(DestroySurfaceKHR);
+ GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
+ GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
+ GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
+ GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
+ GET_PROC(CreateSwapchainKHR);
+ GET_PROC(DestroySwapchainKHR);
+ GET_PROC(GetSwapchainImagesKHR);
+ GET_PROC(AcquireNextImageKHR);
+ GET_PROC(QueuePresentKHR);
+ GET_PROC(GetPhysicalDeviceDisplayPropertiesKHR);
+ GET_PROC(GetPhysicalDeviceDisplayPlanePropertiesKHR);
+ GET_PROC(GetDisplayPlaneSupportedDisplaysKHR);
+ GET_PROC(GetDisplayModePropertiesKHR);
+ GET_PROC(CreateDisplayModeKHR);
+ GET_PROC(GetDisplayPlaneCapabilitiesKHR);
+ GET_PROC(CreateDisplayPlaneSurfaceKHR);
+ GET_PROC(CreateSharedSwapchainsKHR);
+
+ return interface;
+}
+
+#define RETURN_FALSE_INTERFACE \
+ if (kIsDebug) { SkDebugf("%s:%d GrVkInterface::validate() failed.\n", __FILE__, __LINE__); } \
+ return false;
+
+bool GrVkInterface::validate() const {
+ // functions that are always required
+ if (NULL == fFunctions.fCreateInstance ||
+ NULL == fFunctions.fDestroyInstance ||
+ NULL == fFunctions.fEnumeratePhysicalDevices ||
+ NULL == fFunctions.fGetPhysicalDeviceFeatures ||
+ NULL == fFunctions.fGetPhysicalDeviceFormatProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceImageFormatProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceQueueFamilyProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceMemoryProperties ||
+ NULL == fFunctions.fCreateDevice ||
+ NULL == fFunctions.fDestroyDevice ||
+ NULL == fFunctions.fEnumerateInstanceExtensionProperties ||
+ NULL == fFunctions.fEnumerateDeviceExtensionProperties ||
+ NULL == fFunctions.fEnumerateInstanceLayerProperties ||
+ NULL == fFunctions.fEnumerateDeviceLayerProperties ||
+ NULL == fFunctions.fGetDeviceQueue ||
+ NULL == fFunctions.fQueueSubmit ||
+ NULL == fFunctions.fQueueWaitIdle ||
+ NULL == fFunctions.fDeviceWaitIdle ||
+ NULL == fFunctions.fAllocateMemory ||
+ NULL == fFunctions.fFreeMemory ||
+ NULL == fFunctions.fMapMemory ||
+ NULL == fFunctions.fUnmapMemory ||
+ NULL == fFunctions.fFlushMappedMemoryRanges ||
+ NULL == fFunctions.fInvalidateMappedMemoryRanges ||
+ NULL == fFunctions.fGetDeviceMemoryCommitment ||
+ NULL == fFunctions.fBindBufferMemory ||
+ NULL == fFunctions.fBindImageMemory ||
+ NULL == fFunctions.fGetBufferMemoryRequirements ||
+ NULL == fFunctions.fGetImageMemoryRequirements ||
+ NULL == fFunctions.fGetImageSparseMemoryRequirements ||
+ NULL == fFunctions.fGetPhysicalDeviceSparseImageFormatProperties ||
+ NULL == fFunctions.fQueueBindSparse ||
+ NULL == fFunctions.fCreateFence ||
+ NULL == fFunctions.fDestroyFence ||
+ NULL == fFunctions.fResetFences ||
+ NULL == fFunctions.fGetFenceStatus ||
+ NULL == fFunctions.fWaitForFences ||
+ NULL == fFunctions.fCreateSemaphore ||
+ NULL == fFunctions.fDestroySemaphore ||
+ NULL == fFunctions.fCreateEvent ||
+ NULL == fFunctions.fDestroyEvent ||
+ NULL == fFunctions.fGetEventStatus ||
+ NULL == fFunctions.fSetEvent ||
+ NULL == fFunctions.fResetEvent ||
+ NULL == fFunctions.fCreateQueryPool ||
+ NULL == fFunctions.fDestroyQueryPool ||
+ NULL == fFunctions.fGetQueryPoolResults ||
+ NULL == fFunctions.fCreateBuffer ||
+ NULL == fFunctions.fDestroyBuffer ||
+ NULL == fFunctions.fCreateBufferView ||
+ NULL == fFunctions.fDestroyBufferView ||
+ NULL == fFunctions.fCreateImage ||
+ NULL == fFunctions.fDestroyImage ||
+ NULL == fFunctions.fGetImageSubresourceLayout ||
+ NULL == fFunctions.fCreateImageView ||
+ NULL == fFunctions.fDestroyImageView ||
+ NULL == fFunctions.fCreateShaderModule ||
+ NULL == fFunctions.fDestroyShaderModule ||
+ NULL == fFunctions.fCreatePipelineCache ||
+ NULL == fFunctions.fDestroyPipelineCache ||
+ NULL == fFunctions.fGetPipelineCacheData ||
+ NULL == fFunctions.fMergePipelineCaches ||
+ NULL == fFunctions.fCreateGraphicsPipelines ||
+ NULL == fFunctions.fCreateComputePipelines ||
+ NULL == fFunctions.fDestroyPipeline ||
+ NULL == fFunctions.fCreatePipelineLayout ||
+ NULL == fFunctions.fDestroyPipelineLayout ||
+ NULL == fFunctions.fCreateSampler ||
+ NULL == fFunctions.fDestroySampler ||
+ NULL == fFunctions.fCreateDescriptorSetLayout ||
+ NULL == fFunctions.fDestroyDescriptorSetLayout ||
+ NULL == fFunctions.fCreateDescriptorPool ||
+ NULL == fFunctions.fDestroyDescriptorPool ||
+ NULL == fFunctions.fResetDescriptorPool ||
+ NULL == fFunctions.fAllocateDescriptorSets ||
+ NULL == fFunctions.fFreeDescriptorSets ||
+ NULL == fFunctions.fUpdateDescriptorSets ||
+ NULL == fFunctions.fCreateFramebuffer ||
+ NULL == fFunctions.fDestroyFramebuffer ||
+ NULL == fFunctions.fCreateRenderPass ||
+ NULL == fFunctions.fDestroyRenderPass ||
+ NULL == fFunctions.fGetRenderAreaGranularity ||
+ NULL == fFunctions.fCreateCommandPool ||
+ NULL == fFunctions.fDestroyCommandPool ||
+ NULL == fFunctions.fResetCommandPool ||
+ NULL == fFunctions.fAllocateCommandBuffers ||
+ NULL == fFunctions.fFreeCommandBuffers ||
+ NULL == fFunctions.fBeginCommandBuffer ||
+ NULL == fFunctions.fEndCommandBuffer ||
+ NULL == fFunctions.fResetCommandBuffer ||
+ NULL == fFunctions.fCmdBindPipeline ||
+ NULL == fFunctions.fCmdSetViewport ||
+ NULL == fFunctions.fCmdSetScissor ||
+ NULL == fFunctions.fCmdSetLineWidth ||
+ NULL == fFunctions.fCmdSetDepthBias ||
+ NULL == fFunctions.fCmdSetBlendConstants ||
+ NULL == fFunctions.fCmdSetDepthBounds ||
+ NULL == fFunctions.fCmdSetStencilCompareMask ||
+ NULL == fFunctions.fCmdSetStencilWriteMask ||
+ NULL == fFunctions.fCmdSetStencilReference ||
+ NULL == fFunctions.fCmdBindDescriptorSets ||
+ NULL == fFunctions.fCmdBindIndexBuffer ||
+ NULL == fFunctions.fCmdBindVertexBuffers ||
+ NULL == fFunctions.fCmdDraw ||
+ NULL == fFunctions.fCmdDrawIndexed ||
+ NULL == fFunctions.fCmdDrawIndirect ||
+ NULL == fFunctions.fCmdDrawIndexedIndirect ||
+ NULL == fFunctions.fCmdDispatch ||
+ NULL == fFunctions.fCmdDispatchIndirect ||
+ NULL == fFunctions.fCmdCopyBuffer ||
+ NULL == fFunctions.fCmdCopyImage ||
+ NULL == fFunctions.fCmdBlitImage ||
+ NULL == fFunctions.fCmdCopyBufferToImage ||
+ NULL == fFunctions.fCmdCopyImageToBuffer ||
+ NULL == fFunctions.fCmdUpdateBuffer ||
+ NULL == fFunctions.fCmdFillBuffer ||
+ NULL == fFunctions.fCmdClearColorImage ||
+ NULL == fFunctions.fCmdClearDepthStencilImage ||
+ NULL == fFunctions.fCmdClearAttachments ||
+ NULL == fFunctions.fCmdResolveImage ||
+ NULL == fFunctions.fCmdSetEvent ||
+ NULL == fFunctions.fCmdResetEvent ||
+ NULL == fFunctions.fCmdWaitEvents ||
+ NULL == fFunctions.fCmdPipelineBarrier ||
+ NULL == fFunctions.fCmdBeginQuery ||
+ NULL == fFunctions.fCmdEndQuery ||
+ NULL == fFunctions.fCmdResetQueryPool ||
+ NULL == fFunctions.fCmdWriteTimestamp ||
+ NULL == fFunctions.fCmdCopyQueryPoolResults ||
+ NULL == fFunctions.fCmdPushConstants ||
+ NULL == fFunctions.fCmdBeginRenderPass ||
+ NULL == fFunctions.fCmdNextSubpass ||
+ NULL == fFunctions.fCmdEndRenderPass ||
+ NULL == fFunctions.fCmdExecuteCommands ||
+ NULL == fFunctions.fDestroySurfaceKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfaceSupportKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfaceCapabilitiesKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfaceFormatsKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfacePresentModesKHR ||
+ NULL == fFunctions.fCreateSwapchainKHR ||
+ NULL == fFunctions.fDestroySwapchainKHR ||
+ NULL == fFunctions.fGetSwapchainImagesKHR ||
+ NULL == fFunctions.fAcquireNextImageKHR ||
+ NULL == fFunctions.fQueuePresentKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceDisplayPropertiesKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceDisplayPlanePropertiesKHR ||
+ NULL == fFunctions.fGetDisplayPlaneSupportedDisplaysKHR ||
+ NULL == fFunctions.fGetDisplayModePropertiesKHR ||
+ NULL == fFunctions.fCreateDisplayModeKHR ||
+ NULL == fFunctions.fGetDisplayPlaneCapabilitiesKHR ||
+ NULL == fFunctions.fCreateDisplayPlaneSurfaceKHR ||
+ NULL == fFunctions.fCreateSharedSwapchainsKHR) {
+ return false;
+ }
+ return true;
+}
+
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkMemory.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
+ uint32_t typeBits,
+ VkMemoryPropertyFlags requestedMemFlags,
+ uint32_t* typeIndex) {
+ uint32_t checkBit = 1;
+ for (uint32_t i = 0; i < 32; ++i) {
+ if (typeBits & checkBit) {
+ uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
+ requestedMemFlags;
+ if (supportedFlags == requestedMemFlags) {
+ *typeIndex = i;
+ return true;
+ }
+ }
+ checkBit <<= 1;
+ }
+ return false;
+}
+
+static bool alloc_device_memory(const GrVkGpu* gpu,
+ VkMemoryRequirements* memReqs,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ uint32_t typeIndex;
+ if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
+ memReqs->memoryTypeBits,
+ flags,
+ &typeIndex)) {
+ return false;
+ }
+
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ memReqs->size, // allocationSize
+ typeIndex, // memoryTypeIndex
+ };
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
+ &allocInfo,
+ nullptr,
+ memory));
+ if (err) {
+ return false;
+ }
+ return true;
+}
+
+bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ const GrVkInterface* interface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(interface, GetBufferMemoryRequirements(device, buffer, &memReqs));
+
+
+ if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
+ return false;
+ }
+
+ // Bind Memory to queue
+ VkResult err = GR_VK_CALL(interface, BindBufferMemory(device, buffer, *memory, 0));
+ if (err) {
+ GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr));
+ return false;
+ }
+ return true;
+}
+
+bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ const GrVkInterface* interface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(interface, GetImageMemoryRequirements(device, image, &memReqs));
+
+ if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
+ return false;
+ }
+
+ // Bind Memory to queue
+ VkResult err = GR_VK_CALL(interface, BindImageMemory(device, image, *memory, 0));
+ if (err) {
+ GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr));
+ return false;
+ }
+ return true;
+}
+
+VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ flags = VK_ACCESS_SHADER_READ_BIT;
+ }
+ return flags;
+}
+
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkMemory_DEFINED
+#define GrVkMemory_DEFINED
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+
+namespace GrVkMemory {
+ /**
+ * Allocates vulkan device memory and binds it to the gpu's device for the given object.
+ * Returns true of allocation succeeded.
+ */
+ bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory);
+
+ bool AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory);
+
+ VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
+
+ VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
+}
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkPipeline.h"
+
+#include "GrGeometryProcessor.h"
+#include "GrPipeline.h"
+
+#include "GrVkGpu.h"
+#include "GrVkProgramDesc.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkUtil.h"
+
+static inline const VkFormat& attrib_type_to_vkformat(GrVertexAttribType type) {
+ SkASSERT(type >= 0 && type < kGrVertexAttribTypeCount);
+ static const VkFormat kFormats[kGrVertexAttribTypeCount] = {
+ VK_FORMAT_R32_SFLOAT, // kFloat_GrVertexAttribType
+ VK_FORMAT_R32G32_SFLOAT, // kVec2f_GrVertexAttribType
+ VK_FORMAT_R32G32B32_SFLOAT, // kVec3f_GrVertexAttribType
+ VK_FORMAT_R32G32B32A32_SFLOAT, // kVec4f_GrVertexAttribType
+ VK_FORMAT_R8_UNORM, // kUByte_GrVertexAttribType
+ VK_FORMAT_R8G8B8A8_UNORM, // kVec4ub_GrVertexAttribType
+ VK_FORMAT_R16G16_SSCALED, // kVec2s_GrVertexAttribType
+ };
+ GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+ GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+ GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+ GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+ GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+ GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+ GR_STATIC_ASSERT(6 == kVec2s_GrVertexAttribType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kFormats) == kGrVertexAttribTypeCount);
+ return kFormats[type];
+}
+
+static void setup_vertex_input_state(const GrPrimitiveProcessor& primProc,
+ VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
+ VkVertexInputBindingDescription* bindingDesc,
+ int maxBindingDescCount,
+ VkVertexInputAttributeDescription* attributeDesc,
+ int maxAttributeDescCount) {
+ // for now we have only one vertex buffer and one binding
+ memset(bindingDesc, 0, sizeof(VkVertexInputBindingDescription));
+ bindingDesc->binding = 0;
+ bindingDesc->stride = (uint32_t)primProc.getVertexStride();
+ bindingDesc->inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ // setup attribute descriptions
+ int vaCount = primProc.numAttribs();
+ SkASSERT(vaCount < maxAttributeDescCount);
+ if (vaCount > 0) {
+ size_t offset = 0;
+ for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
+ const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
+ GrVertexAttribType attribType = attrib.fType;
+
+ VkVertexInputAttributeDescription& vkAttrib = attributeDesc[attribIndex];
+ vkAttrib.location = attribIndex; // for now assume location = attribIndex
+ vkAttrib.binding = 0; // for now only one vertex buffer & binding
+ vkAttrib.format = attrib_type_to_vkformat(attribType);
+ vkAttrib.offset = static_cast<uint32_t>(offset);
+ offset += attrib.fOffset;
+ }
+ }
+
+ memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
+ vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertexInputInfo->pNext = nullptr;
+ vertexInputInfo->flags = 0;
+ vertexInputInfo->vertexBindingDescriptionCount = 1;
+ vertexInputInfo->pVertexBindingDescriptions = bindingDesc;
+ vertexInputInfo->vertexAttributeDescriptionCount = vaCount;
+ vertexInputInfo->pVertexAttributeDescriptions = attributeDesc;
+}
+
+
+static void setup_input_assembly_state(GrPrimitiveType primitiveType,
+ VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
+ static const VkPrimitiveTopology gPrimitiveType2VkTopology[] = {
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
+ };
+
+ memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
+ inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ inputAssemblyInfo->pNext = nullptr;
+ inputAssemblyInfo->flags = 0;
+ inputAssemblyInfo->primitiveRestartEnable = false;
+ inputAssemblyInfo->topology = gPrimitiveType2VkTopology[primitiveType];
+}
+
+
+VkStencilOp stencil_op_to_vk_stencil_op(GrStencilOp op) {
+ static const VkStencilOp gTable[] = {
+ VK_STENCIL_OP_KEEP, // kKeep_StencilOp
+ VK_STENCIL_OP_REPLACE, // kReplace_StencilOp
+ VK_STENCIL_OP_INCREMENT_AND_WRAP, // kIncWrap_StencilOp
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP, // kIncClamp_StencilOp
+ VK_STENCIL_OP_DECREMENT_AND_WRAP, // kDecWrap_StencilOp
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP, // kDecClamp_StencilOp
+ VK_STENCIL_OP_ZERO, // kZero_StencilOp
+ VK_STENCIL_OP_INVERT, // kInvert_StencilOp
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount);
+ GR_STATIC_ASSERT(0 == kKeep_StencilOp);
+ GR_STATIC_ASSERT(1 == kReplace_StencilOp);
+ GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
+ GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
+ GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
+ GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
+ GR_STATIC_ASSERT(6 == kZero_StencilOp);
+ GR_STATIC_ASSERT(7 == kInvert_StencilOp);
+ SkASSERT((unsigned)op < kStencilOpCount);
+ return gTable[op];
+}
+
+VkCompareOp stencil_func_to_vk_compare_op(GrStencilFunc basicFunc) {
+ static const VkCompareOp gTable[] = {
+ VK_COMPARE_OP_ALWAYS, // kAlways_StencilFunc
+ VK_COMPARE_OP_NEVER, // kNever_StencilFunc
+ VK_COMPARE_OP_GREATER, // kGreater_StencilFunc
+ VK_COMPARE_OP_GREATER_OR_EQUAL, // kGEqual_StencilFunc
+ VK_COMPARE_OP_LESS, // kLess_StencilFunc
+ VK_COMPARE_OP_LESS_OR_EQUAL, // kLEqual_StencilFunc,
+ VK_COMPARE_OP_EQUAL, // kEqual_StencilFunc,
+ VK_COMPARE_OP_NOT_EQUAL, // kNotEqual_StencilFunc,
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kBasicStencilFuncCount);
+ GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
+ GR_STATIC_ASSERT(1 == kNever_StencilFunc);
+ GR_STATIC_ASSERT(2 == kGreater_StencilFunc);
+ GR_STATIC_ASSERT(3 == kGEqual_StencilFunc);
+ GR_STATIC_ASSERT(4 == kLess_StencilFunc);
+ GR_STATIC_ASSERT(5 == kLEqual_StencilFunc);
+ GR_STATIC_ASSERT(6 == kEqual_StencilFunc);
+ GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc);
+ SkASSERT((unsigned)basicFunc < kBasicStencilFuncCount);
+
+ return gTable[basicFunc];
+}
+
+void setup_depth_stencil_state(const GrVkGpu* gpu,
+ const GrStencilSettings& stencilSettings,
+ VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
+ memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
+ stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ stencilInfo->pNext = nullptr;
+ stencilInfo->flags = 0;
+ // set depth testing defaults
+ stencilInfo->depthTestEnable = VK_FALSE;
+ stencilInfo->depthWriteEnable = VK_FALSE;
+ stencilInfo->depthCompareOp = VK_COMPARE_OP_ALWAYS;
+ stencilInfo->depthBoundsTestEnable = VK_FALSE;
+ stencilInfo->stencilTestEnable = !stencilSettings.isDisabled();
+ if (!stencilSettings.isDisabled()) {
+ // Set front face
+ GrStencilSettings::Face face = GrStencilSettings::kFront_Face;
+ stencilInfo->front.failOp = stencil_op_to_vk_stencil_op(stencilSettings.failOp(face));
+ stencilInfo->front.passOp = stencil_op_to_vk_stencil_op(stencilSettings.passOp(face));
+ stencilInfo->front.depthFailOp = stencilInfo->front.failOp;
+ stencilInfo->front.compareOp = stencil_func_to_vk_compare_op(stencilSettings.func(face));
+ stencilInfo->front.compareMask = stencilSettings.funcMask(face);
+ stencilInfo->front.writeMask = 0;
+ stencilInfo->front.reference = 0;
+
+ // Set back face
+ face = GrStencilSettings::kBack_Face;
+ stencilInfo->back.failOp = stencil_op_to_vk_stencil_op(stencilSettings.failOp(face));
+ stencilInfo->back.passOp = stencil_op_to_vk_stencil_op(stencilSettings.passOp(face));
+ stencilInfo->back.depthFailOp = stencilInfo->front.failOp;
+ stencilInfo->back.compareOp = stencil_func_to_vk_compare_op(stencilSettings.func(face));
+ stencilInfo->back.compareMask = stencilSettings.funcMask(face);
+ stencilInfo->back.writeMask = 0;
+ stencilInfo->back.reference = 0;
+ }
+ stencilInfo->minDepthBounds = 0.0f;
+ stencilInfo->maxDepthBounds = 1.0f;
+}
+
+void setup_viewport_scissor_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrVkRenderTarget* vkRT,
+ VkPipelineViewportStateCreateInfo* viewportInfo,
+ VkViewport* viewport,
+ VkRect2D* scissor) {
+ memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
+ viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewportInfo->pNext = nullptr;
+ viewportInfo->flags = 0;
+
+ viewport->x = 0.0f;
+ viewport->y = 0.0f;
+ viewport->width = SkIntToScalar(vkRT->width());
+ viewport->height = SkIntToScalar(vkRT->height());
+ viewport->minDepth = 0.0f;
+ viewport->maxDepth = 1.0f;
+ viewportInfo->viewportCount = 1;
+ viewportInfo->pViewports = viewport;
+
+ const GrScissorState& scissorState = pipeline.getScissorState();
+ if (scissorState.enabled() &&
+ !scissorState.rect().contains(0, 0, vkRT->width(), vkRT->height())) {
+ // This all assumes the scissorState has previously been clipped to the device space render
+ // target.
+ scissor->offset.x = scissorState.rect().fLeft;
+ scissor->extent.width = scissorState.rect().width();
+ if (kTopLeft_GrSurfaceOrigin == vkRT->origin()) {
+ scissor->offset.y = scissorState.rect().fTop;
+ } else {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == vkRT->origin());
+ scissor->offset.y = vkRT->height() - scissorState.rect().fBottom;
+ }
+ scissor->extent.height = scissorState.rect().height();
+
+ viewportInfo->scissorCount = 1;
+ viewportInfo->pScissors = scissor;
+ SkASSERT(scissor->offset.x >= 0);
+ SkASSERT(scissor->offset.x + scissor->extent.width <= (uint32_t)vkRT->width());
+ SkASSERT(scissor->offset.y >= 0);
+ SkASSERT(scissor->offset.y + scissor->extent.height <= (uint32_t)vkRT->height());
+ } else {
+ scissor->extent.width = vkRT->width();
+ scissor->extent.height = vkRT->height();
+ scissor->offset.x = 0;
+ scissor->offset.y = 0;
+ viewportInfo->scissorCount = 1;
+ viewportInfo->pScissors = scissor;
+ }
+ SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
+}
+
+void setup_multisample_state(const GrPipeline& pipeline,
+ VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
+ memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
+ multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisampleInfo->pNext = nullptr;
+ multisampleInfo->flags = 0;
+ int numSamples = pipeline.getRenderTarget()->numColorSamples();
+ SkAssertResult(GrSampleCountToVkSampleCount(numSamples,
+ &multisampleInfo->rasterizationSamples));
+ multisampleInfo->sampleShadingEnable = VK_FALSE;
+ multisampleInfo->minSampleShading = 0;
+ multisampleInfo->pSampleMask = nullptr;
+ multisampleInfo->alphaToCoverageEnable = VK_FALSE;
+ multisampleInfo->alphaToOneEnable = VK_FALSE;
+}
+
+static VkBlendFactor blend_coeff_to_vk_blend(GrBlendCoeff coeff) {
+ static const VkBlendFactor gTable[] = {
+ VK_BLEND_FACTOR_ZERO, // kZero_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE, // kOne_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_COLOR, // kSC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, // kISC_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_COLOR, // kDC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, // kIDC_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_ALPHA, // kSA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, // kISA_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_ALPHA, // kDA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, // kIDA_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_COLOR, // kConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, // kIConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_ALPHA, // kConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, // kIConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_COLOR, // kS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR, // kIS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_ALPHA, // kS2A_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, // kIS2A_GrBlendCoeff
+
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrBlendCoeffCnt);
+ GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
+
+ SkASSERT((unsigned)coeff < kGrBlendCoeffCnt);
+ return gTable[coeff];
+}
+
+
+static VkBlendOp blend_equation_to_vk_blend_op(GrBlendEquation equation) {
+ static const VkBlendOp gTable[] = {
+ VK_BLEND_OP_ADD, // kAdd_GrBlendEquation
+ VK_BLEND_OP_SUBTRACT, // kSubtract_GrBlendEquation
+ VK_BLEND_OP_REVERSE_SUBTRACT, // kReverseSubtract_GrBlendEquation
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+ GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+ GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+
+ SkASSERT((unsigned)equation < kGrBlendCoeffCnt);
+ return gTable[equation];
+}
+
+bool blend_coeff_refs_constant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
+ // Individual enum asserts already made in blend_coeff_to_vk_blend
+}
+
+void setup_color_blend_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
+ VkPipelineColorBlendAttachmentState* attachmentState) {
+ GrXferProcessor::BlendInfo blendInfo;
+ pipeline.getXferProcessor().getBlendInfo(&blendInfo);
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
+
+ memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
+ attachmentState->blendEnable = !blendOff;
+ if (!blendOff) {
+ attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
+ attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
+ }
+ attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+
+ memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
+ colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlendInfo->pNext = nullptr;
+ colorBlendInfo->flags = 0;
+ colorBlendInfo->logicOpEnable = VK_FALSE;
+ colorBlendInfo->attachmentCount = 1;
+ colorBlendInfo->pAttachments = attachmentState;
+ if (blend_coeff_refs_constant(srcCoeff) || blend_coeff_refs_constant(dstCoeff)) {
+ GrColorToRGBAFloat(blendInfo.fBlendConstant, colorBlendInfo->blendConstants);
+ }
+}
+
+VkCullModeFlags draw_face_to_vk_cull_mode(GrPipelineBuilder::DrawFace drawFace) {
+ // Assumes that we've set the front face to be ccw
+ static const VkCullModeFlags gTable[] = {
+ VK_CULL_MODE_NONE, // kBoth_DrawFace
+ VK_CULL_MODE_BACK_BIT, // kCCW_DrawFace, cull back face
+ VK_CULL_MODE_FRONT_BIT, // kCW_DrawFace, cull front face
+ };
+ GR_STATIC_ASSERT(0 == GrPipelineBuilder::kBoth_DrawFace);
+ GR_STATIC_ASSERT(1 == GrPipelineBuilder::kCCW_DrawFace);
+ GR_STATIC_ASSERT(2 == GrPipelineBuilder::kCW_DrawFace);
+ SkASSERT((unsigned)drawFace <= 2);
+
+ return gTable[drawFace];
+}
+
+void setup_raster_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ VkPipelineRasterizationStateCreateInfo* rasterInfo) {
+ memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
+ rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterInfo->pNext = nullptr;
+ rasterInfo->flags = 0;
+ rasterInfo->depthClampEnable = VK_FALSE;
+ rasterInfo->rasterizerDiscardEnable = VK_FALSE;
+ rasterInfo->polygonMode = VK_POLYGON_MODE_FILL;
+ rasterInfo->cullMode = draw_face_to_vk_cull_mode(pipeline.getDrawFace());
+ rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rasterInfo->depthBiasEnable = VK_FALSE;
+ rasterInfo->depthBiasConstantFactor = 0.0f;
+ rasterInfo->depthBiasClamp = 0.0f;
+ rasterInfo->depthBiasSlopeFactor = 0.0f;
+ rasterInfo->lineWidth = 1.0f;
+}
+
+void setup_dynamic_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ VkPipelineDynamicStateCreateInfo* dynamicInfo) {
+ memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
+ dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ // TODO: mask out any state we might want to set dynamically
+ dynamicInfo->dynamicStateCount = 0;
+}
+
+GrVkPipeline* GrVkPipeline::Create(GrVkGpu* gpu, const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout) {
+ VkPipelineVertexInputStateCreateInfo vertexInputInfo;
+ VkVertexInputBindingDescription bindingDesc;
+ // TODO: allocate this based on VkPhysicalDeviceLimits::maxVertexInputAttributes
+ static const int kMaxVertexAttributes = 16;
+ static VkVertexInputAttributeDescription attributeDesc[kMaxVertexAttributes];
+ setup_vertex_input_state(primProc, &vertexInputInfo, &bindingDesc, 1,
+ attributeDesc, kMaxVertexAttributes);
+
+ VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
+ setup_input_assembly_state(primitiveType, &inputAssemblyInfo);
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
+ setup_depth_stencil_state(gpu, pipeline.getStencil(), &depthStencilInfo);
+
+ GrRenderTarget* rt = pipeline.getRenderTarget();
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
+ VkPipelineViewportStateCreateInfo viewportInfo;
+ VkViewport viewport;
+ VkRect2D scissor;
+ setup_viewport_scissor_state(gpu, pipeline, vkRT, &viewportInfo, &viewport, &scissor);
+
+ VkPipelineMultisampleStateCreateInfo multisampleInfo;
+ setup_multisample_state(pipeline, &multisampleInfo);
+
+ // We will only have one color attachment per pipeline.
+ VkPipelineColorBlendAttachmentState attachmentStates[1];
+ VkPipelineColorBlendStateCreateInfo colorBlendInfo;
+ setup_color_blend_state(gpu, pipeline, &colorBlendInfo, attachmentStates);
+
+ VkPipelineRasterizationStateCreateInfo rasterInfo;
+ setup_raster_state(gpu, pipeline, &rasterInfo);
+
+ VkPipelineDynamicStateCreateInfo dynamicInfo;
+ setup_dynamic_state(gpu, pipeline, &dynamicInfo);
+
+ VkGraphicsPipelineCreateInfo pipelineCreateInfo;
+ memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
+ pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pipelineCreateInfo.pNext = nullptr;
+ pipelineCreateInfo.flags = 0;
+ pipelineCreateInfo.stageCount = shaderStageCount;
+ pipelineCreateInfo.pStages = shaderStageInfo;
+ pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
+ pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
+ pipelineCreateInfo.pTessellationState = nullptr;
+ pipelineCreateInfo.pViewportState = &viewportInfo;
+ pipelineCreateInfo.pRasterizationState = &rasterInfo;
+ pipelineCreateInfo.pMultisampleState = &multisampleInfo;
+ pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
+ pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
+ pipelineCreateInfo.pDynamicState = &dynamicInfo;
+ pipelineCreateInfo.layout = layout;
+ pipelineCreateInfo.renderPass = renderPass.vkRenderPass();
+ pipelineCreateInfo.subpass = 0;
+ pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
+ pipelineCreateInfo.basePipelineIndex = -1;
+
+ VkPipeline vkPipeline;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
+ nullptr, 1,
+ &pipelineCreateInfo,
+ nullptr, &vkPipeline));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkPipeline(vkPipeline);
+}
+
+void GrVkPipeline::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr));
+}
+
+
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipeline_DEFINED
+#define GrVkPipeline_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrNonInstancedVertices;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrVkGpu;
+class GrVkRenderPass;
+
+class GrVkPipeline : public GrVkResource {
+public:
+ static GrVkPipeline* Create(GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout);
+
+ VkPipeline pipeline() const { return fPipeline; }
+
+private:
+ GrVkPipeline(VkPipeline pipeline) : INHERITED(), fPipeline(pipeline) {}
+
+ GrVkPipeline(const GrVkPipeline&);
+ GrVkPipeline& operator=(const GrVkPipeline&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkPipeline fPipeline;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkProgram.h"
+
+#include "GrPipeline.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkDescriptorPool.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkMemory.h"
+#include "GrVkPipeline.h"
+#include "GrVkSampler.h"
+#include "GrVkTexture.h"
+#include "GrVkUniformBuffer.h"
+#include "glsl/GrGLSLFragmentProcessor.h"\r
+#include "glsl/GrGLSLGeometryProcessor.h"\r
+#include "glsl/GrGLSLXferProcessor.h"
+
+GrVkProgram::GrVkProgram(GrVkGpu* gpu,
+ GrVkPipeline* pipeline,
+ VkPipelineLayout layout,
+ VkDescriptorSetLayout dsLayout[2],
+ GrVkDescriptorPool* descriptorPool,
+ VkDescriptorSet descriptorSets[2],
+ const BuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize,
+ uint32_t numSamplers,\r
+ GrGLSLPrimitiveProcessor* geometryProcessor,\r
+ GrGLSLXferProcessor* xferProcessor,\r
+ const GrGLSLFragProcs& fragmentProcessors)
+ : fDescriptorPool(descriptorPool)
+ , fPipeline(pipeline)
+ , fPipelineLayout(layout)
+ , fBuiltinUniformHandles(builtinUniformHandles)
+ , fGeometryProcessor(geometryProcessor)\r
+ , fXferProcessor(xferProcessor)\r
+ , fFragmentProcessors(fragmentProcessors)
+ , fProgramDataManager(uniforms, vertexUniformSize, fragmentUniformSize) {
+ fSamplers.setReserve(numSamplers);
+ fTextureViews.setReserve(numSamplers);
+ fTextures.setReserve(numSamplers);
+
+ memcpy(fDSLayout, dsLayout, 2 * sizeof(VkDescriptorSetLayout));
+ memcpy(fDescriptorSets, descriptorSets, 2 * sizeof(VkDescriptorSetLayout));
+
+ fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize, true));
+ fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize, true));
+
+#ifdef SK_DEBUG
+ fNumSamplers = numSamplers;
+#endif
+}
+
+GrVkProgram::~GrVkProgram() {
+ // Must of freed all GPU resources before this is destroyed
+ SkASSERT(!fPipeline);
+ SkASSERT(!fDescriptorPool);
+ SkASSERT(!fPipelineLayout);
+ SkASSERT(!fDSLayout[0]);
+ SkASSERT(!fDSLayout[1]);
+ SkASSERT(!fSamplers.count());
+ SkASSERT(!fTextureViews.count());
+ SkASSERT(!fTextures.count());
+}
+
+void GrVkProgram::freeTempResources(const GrVkGpu* gpu) {
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ fSamplers[i]->unref(gpu);
+ }
+ fSamplers.rewind();
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ fTextureViews[i]->unref(gpu);
+ }
+ fTextureViews.rewind();
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ fTextures[i]->unref(gpu);
+ }
+ fTextures.rewind();
+}
+
+void GrVkProgram::freeGPUResources(const GrVkGpu* gpu) {
+ if (fPipeline) {
+ fPipeline->unref(gpu);
+ fPipeline = nullptr;
+ }
+ if (fDescriptorPool) {
+ fDescriptorPool->unref(gpu);
+ fDescriptorPool = nullptr;
+ }
+ if (fPipelineLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(),
+ fPipelineLayout,
+ nullptr));
+ fPipelineLayout = nullptr;
+ }
+
+ if (fDSLayout[0]) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDSLayout[0],
+ nullptr));
+ fDSLayout[0] = nullptr;
+ }
+ if (fDSLayout[1]) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDSLayout[1],
+ nullptr));
+ fDSLayout[1] = nullptr;
+ }
+
+ if (fVertexUniformBuffer) {
+ fVertexUniformBuffer->release(gpu);
+ }
+
+ if (fFragmentUniformBuffer) {
+ fFragmentUniformBuffer->release(gpu);
+ }
+ this->freeTempResources(gpu);
+}
+
+void GrVkProgram::abandonGPUResources() {
+ fPipeline->unrefAndAbandon();
+ fPipeline = nullptr;
+ fDescriptorPool->unrefAndAbandon();
+ fDescriptorPool = nullptr;
+ fPipelineLayout = nullptr;
+ fDSLayout[0] = nullptr;
+ fDSLayout[1] = nullptr;
+
+ fVertexUniformBuffer->abandon();
+ fFragmentUniformBuffer->abandon();
+
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ fSamplers[i]->unrefAndAbandon();
+ }
+ fSamplers.rewind();
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ fTextureViews[i]->unrefAndAbandon();
+ }
+ fTextureViews.rewind();
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ fTextures[i]->unrefAndAbandon();
+ }
+ fTextures.rewind();
+}
+
+static void append_texture_bindings(const GrProcessor& processor,\r
+ SkTArray<const GrTextureAccess*>* textureBindings) {\r
+ if (int numTextures = processor.numTextures()) {\r
+ const GrTextureAccess** bindings = textureBindings->push_back_n(numTextures);\r
+ int i = 0;\r
+ do {\r
+ bindings[i] = &processor.textureAccess(i);\r
+ } while (++i < numTextures);\r
+ }\r
+}
+
+void GrVkProgram::setData(const GrVkGpu* gpu,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) {
+ // This is here to protect against someone calling setData multiple times in a row without
+ // freeing the tempData between calls.
+ this->freeTempResources(gpu);
+
+ this->setRenderTargetState(pipeline);
+
+ SkSTArray<8, const GrTextureAccess*> textureBindings;
+
+ fGeometryProcessor->setData(fProgramDataManager, primProc);
+ append_texture_bindings(primProc, &textureBindings);
+
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ const GrFragmentProcessor& processor = pipeline.getFragmentProcessor(i);\r
+ fFragmentProcessors[i]->setData(fProgramDataManager, processor);
+ fGeometryProcessor->setTransformData(primProc, fProgramDataManager, i,
+ processor.coordTransforms());
+ append_texture_bindings(processor, &textureBindings);
+ }
+
+ fXferProcessor->setData(fProgramDataManager, pipeline.getXferProcessor());
+ append_texture_bindings(pipeline.getXferProcessor(), &textureBindings);
+
+ this->writeUniformBuffers(gpu);
+
+ this->writeSamplers(gpu, textureBindings);
+}
+
+void GrVkProgram::writeUniformBuffers(const GrVkGpu* gpu) {
+ fProgramDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer);
+
+ VkWriteDescriptorSet descriptorWrites[2];
+ memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
+
+ uint32_t firstUniformWrite = 0;
+ uint32_t uniformBindingUpdateCount = 0;
+
+ // Vertex Uniform Buffer
+ if (fVertexUniformBuffer.get()) {
+ ++uniformBindingUpdateCount;
+ VkDescriptorBufferInfo vertBufferInfo;
+ memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ vertBufferInfo.buffer = fVertexUniformBuffer->buffer();
+ vertBufferInfo.offset = 0;
+ vertBufferInfo.range = fVertexUniformBuffer->size();
+
+ descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites[0].pNext = nullptr;
+ descriptorWrites[0].dstSet = fDescriptorSets[1];
+ descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding;
+ descriptorWrites[0].dstArrayElement = 0;
+ descriptorWrites[0].descriptorCount = 1;
+ descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites[0].pImageInfo = nullptr;
+ descriptorWrites[0].pBufferInfo = &vertBufferInfo;
+ descriptorWrites[0].pTexelBufferView = nullptr;
+ }
+
+ // Fragment Uniform Buffer
+ if (fFragmentUniformBuffer.get()) {
+ if (0 == uniformBindingUpdateCount) {
+ firstUniformWrite = 1;
+ }
+ ++uniformBindingUpdateCount;
+ VkDescriptorBufferInfo fragBufferInfo;
+ memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ fragBufferInfo.buffer = fFragmentUniformBuffer->buffer();
+ fragBufferInfo.offset = 0;
+ fragBufferInfo.range = fFragmentUniformBuffer->size();
+
+ descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites[1].pNext = nullptr;
+ descriptorWrites[1].dstSet = fDescriptorSets[1];
+ descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;;
+ descriptorWrites[1].dstArrayElement = 0;
+ descriptorWrites[1].descriptorCount = 1;
+ descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites[1].pImageInfo = nullptr;
+ descriptorWrites[1].pBufferInfo = &fragBufferInfo;
+ descriptorWrites[1].pTexelBufferView = nullptr;
+ }
+
+ if (uniformBindingUpdateCount) {
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ uniformBindingUpdateCount,
+ &descriptorWrites[firstUniformWrite],
+ 0, nullptr));
+ }
+}
+
+void GrVkProgram::writeSamplers(const GrVkGpu* gpu,
+ const SkTArray<const GrTextureAccess*>& textureBindings) {
+ SkASSERT(fNumSamplers == textureBindings.count());
+
+ for (int i = 0; i < textureBindings.count(); ++i) {
+ fSamplers.push(GrVkSampler::Create(gpu, *textureBindings[i]));
+
+ GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture());
+
+ const GrVkImage::Resource* textureResource = texture->resource();
+ textureResource->ref();
+ fTextures.push(textureResource);
+
+ const GrVkImageView* textureView = texture->textureView();
+ textureView->ref();
+ fTextureViews.push(textureView);
+
+ // Change texture layout so it can be read in shader
+ VkImageLayout layout = texture->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ texture->setImageLayout(gpu,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+\r
+ VkDescriptorImageInfo imageInfo;\r
+ memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));\r
+ imageInfo.sampler = fSamplers[i]->sampler();\r
+ imageInfo.imageView = texture->textureView()->imageView();\r
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;\r
+\r
+ VkWriteDescriptorSet writeInfo;\r
+ memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));\r
+ writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;\r
+ writeInfo.pNext = nullptr;\r
+ writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];\r
+ writeInfo.dstBinding = i;\r
+ writeInfo.dstArrayElement = 0;\r
+ writeInfo.descriptorCount = 1;\r
+ writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;\r
+ writeInfo.pImageInfo = &imageInfo;\r
+ writeInfo.pBufferInfo = nullptr;\r
+ writeInfo.pTexelBufferView = nullptr;
+
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ 1,
+ &writeInfo,
+ 0,
+ nullptr));
+ }
+}
+
+void GrVkProgram::setRenderTargetState(const GrPipeline& pipeline) {\r
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.\r
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&\r
+ fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) {\r
+ fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni,\r
+ SkIntToScalar(pipeline.getRenderTarget()->height()));\r
+ }\r
+\r
+ // set RT adjustment\r
+ const GrRenderTarget* rt = pipeline.getRenderTarget();\r
+ SkISize size;\r
+ size.set(rt->width(), rt->height());\r
+ SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());\r
+ if (fRenderTargetState.fRenderTargetOrigin != rt->origin() ||\r
+ fRenderTargetState.fRenderTargetSize != size) {\r
+ fRenderTargetState.fRenderTargetSize = size;\r
+ fRenderTargetState.fRenderTargetOrigin = rt->origin();\r
+\r
+ float rtAdjustmentVec[4];\r
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);\r
+ fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);\r
+ }\r
+}
+
+void GrVkProgram::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
+ commandBuffer->bindPipeline(gpu, fPipeline);
+ commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, 0, 2, fDescriptorSets, 0,
+ nullptr);
+}
+
+void GrVkProgram::addUniformResources(GrVkCommandBuffer& commandBuffer) {
+#if 1
+ commandBuffer.addResource(fDescriptorPool);
+ if (fVertexUniformBuffer.get()) {
+ commandBuffer.addResource(fVertexUniformBuffer->resource());
+ }
+ if (fFragmentUniformBuffer.get()) {
+ commandBuffer.addResource(fFragmentUniformBuffer->resource());
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ commandBuffer.addResource(fSamplers[i]);
+ }
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ commandBuffer.addResource(fTextureViews[i]);
+ }
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ commandBuffer.addResource(fTextures[i]);
+ }
+#endif
+}
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkProgram_DEFINED
+#define GrVkProgram_DEFINED
+
+#include "GrVkImage.h"
+#include "GrVkProgramDesc.h"
+#include "GrVkProgramDataManager.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "vulkan/vulkan.h"
+
+class GrPipeline;
+class GrVkCommandBuffer;
+class GrVkDescriptorPool;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkPipeline;
+class GrVkSampler;
+class GrVkUniformBuffer;
+
+class GrVkProgram : public SkRefCnt {
+public:\r
+ typedef GrGLSLProgramBuilder::BuiltinUniformHandles BuiltinUniformHandles;\r
+
+ ~GrVkProgram();
+
+ GrVkPipeline* vkPipeline() const { return fPipeline; }
+
+ void setData(const GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&);
+
+ void bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer);\r
+\r
+ void addUniformResources(GrVkCommandBuffer&);\r
+
+ void freeGPUResources(const GrVkGpu* gpu);
+\r
+ // This releases resources the only a given instance of a GrVkProgram needs to hold onto and do\r
+ // don't need to survive across new uses of the program.\r
+ void freeTempResources(const GrVkGpu* gpu);
+
+ void abandonGPUResources();
+
+private:
+ typedef GrVkProgramDataManager::UniformInfoArray UniformInfoArray;\r
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;\r
+
+ GrVkProgram(GrVkGpu* gpu,
+ GrVkPipeline* pipeline,
+ VkPipelineLayout layout,
+ VkDescriptorSetLayout dsLayout[2],
+ GrVkDescriptorPool* descriptorPool,
+ VkDescriptorSet descriptorSets[2],
+ const BuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize,
+ uint32_t numSamplers,
+ GrGLSLPrimitiveProcessor* geometryProcessor,\r
+ GrGLSLXferProcessor* xferProcessor,\r
+ const GrGLSLFragProcs& fragmentProcessors);
+\r
+ void writeUniformBuffers(const GrVkGpu* gpu);\r
+
+ void writeSamplers(const GrVkGpu* gpu, const SkTArray<const GrTextureAccess*>& textureBindings);\r
+\r
+\r
+ /**\r
+ * We use the RT's size and origin to adjust from Skia device space to OpenGL normalized device\r
+ * space and to make device space positions have the correct origin for processors that require\r
+ * them.\r
+ */\r
+ struct RenderTargetState {\r
+ SkISize fRenderTargetSize;\r
+ GrSurfaceOrigin fRenderTargetOrigin;\r
+\r
+ RenderTargetState() { this->invalidate(); }\r
+ void invalidate() {\r
+ fRenderTargetSize.fWidth = -1;\r
+ fRenderTargetSize.fHeight = -1;\r
+ fRenderTargetOrigin = (GrSurfaceOrigin)-1;\r
+ }\r
+\r
+ /**\r
+ * Gets a vec4 that adjusts the position from Skia device coords to GL's normalized device\r
+ * coords. Assuming the transformed position, pos, is a homogeneous vec3, the vec, v, is\r
+ * applied as such:\r
+ * pos.x = dot(v.xy, pos.xz)\r
+ * pos.y = dot(v.zw, pos.yz)\r
+ */\r
+ void getRTAdjustmentVec(float* destVec) {\r
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;\r
+ destVec[1] = -1.f;\r
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {\r
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;\r
+ destVec[3] = 1.f;\r
+ } else {\r
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;\r
+ destVec[3] = -1.f;\r
+ }\r
+ }\r
+ };\r
+
+ // Helper for setData() that sets the view matrix and loads the render target height uniform\r
+ void setRenderTargetState(const GrPipeline&);
+
+// GrVkGpu* fGpu;
+
+ // GrVkResources
+ GrVkDescriptorPool* fDescriptorPool;
+ GrVkPipeline* fPipeline;
+
+ // Used for binding DescriptorSets to the command buffer but does not need to survive during
+ // command buffer execution. Thus this is not need to be a GrVkResource.
+ VkPipelineLayout fPipelineLayout;\r
+\r
+ // The first set (index 0) will be used for samplers and the second set (index 1) will be
+ // used for uniform buffers.\r
+ // The DSLayouts only are needed for allocating the descriptor sets and must survive until after\r
+ // descriptor sets have been updated. Thus the lifetime of the layouts will just be the life of\r
+ //the GrVkProgram.\r
+ VkDescriptorSetLayout fDSLayout[2];\r
+ // The DescriptorSets need to survive until the gpu has finished all draws that use them.\r
+ // However, they will only be freed by the descriptor pool. Thus by simply keeping the\r
+ // descriptor pool alive through the draw, the descritor sets will also stay alive. Thus we do\r
+ // not need a GrVkResource versions of VkDescriptorSet.\r
+ VkDescriptorSet fDescriptorSets[2];\r
+\r
+ SkAutoTDelete<GrVkUniformBuffer> fVertexUniformBuffer;\r
+ SkAutoTDelete<GrVkUniformBuffer> fFragmentUniformBuffer;\r
+\r
+ // GrVkResources used for sampling textures\r
+ SkTDArray<GrVkSampler*> fSamplers;\r
+ SkTDArray<const GrVkImageView*> fTextureViews;
+ SkTDArray<const GrVkImage::Resource*> fTextures;\r
+\r
+ // Tracks the current render target uniforms stored in the vertex buffer.\r
+ RenderTargetState fRenderTargetState;\r
+ BuiltinUniformHandles fBuiltinUniformHandles;\r
+\r
+ // Processors in the program\r
+ SkAutoTDelete<GrGLSLPrimitiveProcessor> fGeometryProcessor;\r
+ SkAutoTDelete<GrGLSLXferProcessor> fXferProcessor;\r
+ GrGLSLFragProcs fFragmentProcessors;\r
+\r
+ GrVkProgramDataManager fProgramDataManager;
+
+#ifdef SK_DEBUG
+ int fNumSamplers;
+#endif
+
+ friend class GrVkProgramBuilder;
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "vk/GrVkProgramBuilder.h"
+
+#include "vk/GrVkGpu.h"
+#include "vk/GrVkRenderPass.h"
+#include "vk/GrVkProgram.h"
+
+GrVkProgram* GrVkProgramBuilder::CreateProgram(GrVkGpu* gpu,
+ const DrawArgs& args,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {\r
+ // create a builder. This will be handed off to effects so they can use it to add\r
+ // uniforms, varyings, textures, etc\r
+ GrVkProgramBuilder builder(gpu, args);
+
+ GrGLSLExpr4 inputColor;
+ GrGLSLExpr4 inputCoverage;
+
+ if (!builder.emitAndInstallProcs(&inputColor,
+ &inputCoverage,
+ gpu->vkCaps().maxSampledTextures())) {
+ builder.cleanupFragmentProcessors();
+ return nullptr;
+ }
+
+ return builder.finalize(args, primitiveType, renderPass);
+}
+
+GrVkProgramBuilder::GrVkProgramBuilder(GrVkGpu* gpu, const DrawArgs& args)
+ : INHERITED(args)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this) {
+}
+
+const GrCaps* GrVkProgramBuilder::caps() const {
+ return fGpu->caps();
+}
+const GrGLSLCaps* GrVkProgramBuilder::glslCaps() const {
+ return fGpu->vkCaps().glslCaps();
+}
+
+void GrVkProgramBuilder::finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) {
+ outputColor.setLayoutQualifier("location = 0");
+}
+
+void GrVkProgramBuilder::emitSamplers(const GrProcessor& processor,
+ GrGLSLTextureSampler::TextureSamplerArray* outSamplers) {
+ int numTextures = processor.numTextures();
+ UniformHandle* localSamplerUniforms = fSamplerUniforms.push_back_n(numTextures);
+ SkString name;
+ for (int t = 0; t < numTextures; ++t) {
+ name.printf("%d", t);
+ localSamplerUniforms[t] =
+ fUniformHandler.addUniform(kFragment_GrShaderFlag,
+ kSampler2D_GrSLType, kDefault_GrSLPrecision,
+ name.c_str());
+ outSamplers->emplace_back(localSamplerUniforms[t], processor.textureAccess(t));
+ }
+}
+
+VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
+ VkShaderStageFlags flags = 0;
+
+ if (visibility & kVertex_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
+ }
+ if (visibility & kGeometry_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
+ }
+ if (visibility & kFragment_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+ return flags;
+}
+
+shaderc_shader_kind vk_shader_stage_to_shaderc_kind(VkShaderStageFlagBits stage) {
+ if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
+ return shaderc_glsl_vertex_shader;
+ }
+ SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
+ return shaderc_glsl_fragment_shader;
+}
+
+bool GrVkProgramBuilder::CreateVkShaderModule(const GrVkGpu* gpu,
+ VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo) {
+ SkString shaderString;
+ for (int i = 0; i < builder.fCompilerStrings.count(); ++i) {
+ if (builder.fCompilerStrings[i]) {
+ shaderString.append(builder.fCompilerStrings[i]);
+ shaderString.append("\n");
+ }
+ }
+
+ shaderc_compiler_t compiler = gpu->shadercCompiler();
+
+ shaderc_compile_options_t options = shaderc_compile_options_initialize();
+ shaderc_compile_options_set_forced_version_profile(options, 140, shaderc_profile_none);
+
+ shaderc_shader_kind shadercStage = vk_shader_stage_to_shaderc_kind(stage);
+ shaderc_spv_module_t module = shaderc_compile_into_spv(compiler,
+ shaderString.c_str(),
+ strlen(shaderString.c_str()),
+ shadercStage,
+ "shader",
+ "main",
+ options);
+ shaderc_compile_options_release(options);
+#ifdef SK_DEBUG
+ if (shaderc_module_get_num_errors(module)) {
+ SkDebugf("%s\n", shaderString.c_str());
+ SkDebugf("%s\n", shaderc_module_get_error_message(module));
+ return false;
+ }
+#endif
+
+ VkShaderModuleCreateInfo moduleCreateInfo;
+ memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
+ moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ moduleCreateInfo.pNext = nullptr;
+ moduleCreateInfo.flags = 0;
+ moduleCreateInfo.codeSize = shaderc_module_get_length(module);
+ moduleCreateInfo.pCode = (const uint32_t*)shaderc_module_get_bytes(module);
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
+ &moduleCreateInfo,
+ nullptr,
+ shaderModule));
+ shaderc_module_release(module);
+ if (err) {
+ return false;
+ }
+
+ memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
+ stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stageInfo->pNext = nullptr;
+ stageInfo->flags = 0;
+ stageInfo->stage = stage;
+ stageInfo->module = *shaderModule;
+ stageInfo->pName = "main";
+ stageInfo->pSpecializationInfo = nullptr;
+
+ return true;
+}
+
+GrVkProgram* GrVkProgramBuilder::finalize(const DrawArgs& args,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {
+ VkDescriptorSetLayout dsLayout[2];\r
+ VkPipelineLayout pipelineLayout;\r
+ VkShaderModule vertShaderModule;\r
+ VkShaderModule fragShaderModule;
+
+ uint32_t numSamplers = fSamplerUniforms.count();
+
+ SkAutoTDeleteArray<VkDescriptorSetLayoutBinding> dsSamplerBindings(
+ new VkDescriptorSetLayoutBinding[numSamplers]);
+ for (uint32_t i = 0; i < numSamplers; ++i) {
+ UniformHandle uniHandle = fSamplerUniforms[i];
+ GrVkUniformHandler::UniformInfo uniformInfo = fUniformHandler.getUniformInfo(uniHandle);
+ SkASSERT(kSampler2D_GrSLType == uniformInfo.fVariable.getType());
+ SkASSERT(0 == uniformInfo.fSetNumber);
+ SkASSERT(uniformInfo.fBinding == i);
+ dsSamplerBindings[i].binding = uniformInfo.fBinding;
+ dsSamplerBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ dsSamplerBindings[i].descriptorCount = 1;
+ dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(uniformInfo.fVisibility);
+ dsSamplerBindings[i].pImmutableSamplers = nullptr;
+ }
+
+ VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
+ memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dsSamplerLayoutCreateInfo.pNext = nullptr;
+ dsSamplerLayoutCreateInfo.flags = 0;
+ dsSamplerLayoutCreateInfo.bindingCount = fSamplerUniforms.count();
+ // Setting to nullptr fixes an error in the param checker validation layer. Even though
+ // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is null.
+ dsSamplerLayoutCreateInfo.pBindings = fSamplerUniforms.count() ? dsSamplerBindings.get() :
+ nullptr;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(),
+ CreateDescriptorSetLayout(fGpu->device(),
+ &dsSamplerLayoutCreateInfo,
+ nullptr,
+ &dsLayout[GrVkUniformHandler::kSamplerDescSet]));
+
+ // Create Uniform Buffer Descriptor
+ // We always attach uniform buffers to descriptor set 1. The vertex uniform buffer will have
+ // binding 0 and the fragment binding 1.
+ VkDescriptorSetLayoutBinding dsUniBindings[2];
+ memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding));
+ dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding;
+ dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBindings[0].descriptorCount = fUniformHandler.hasVertexUniforms() ? 1 : 0;
+ dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+ dsUniBindings[0].pImmutableSamplers = nullptr;
+ dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding;
+ dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBindings[1].descriptorCount = fUniformHandler.hasFragmentUniforms() ? 1 : 0;
+ dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+ dsUniBindings[1].pImmutableSamplers = nullptr;
+
+ VkDescriptorSetLayoutCreateInfo dsUniformLayoutCreateInfo;
+ memset(&dsUniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ dsUniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dsUniformLayoutCreateInfo.pNext = nullptr;
+ dsUniformLayoutCreateInfo.flags = 0;
+ dsUniformLayoutCreateInfo.bindingCount = 2;
+ dsUniformLayoutCreateInfo.pBindings = dsUniBindings;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout(
+ fGpu->device(),
+ &dsUniformLayoutCreateInfo,
+ nullptr,
+ &dsLayout[GrVkUniformHandler::kUniformBufferDescSet]));
+
+ // Create the VkPipelineLayout
+ VkPipelineLayoutCreateInfo layoutCreateInfo;
+ memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
+ layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ layoutCreateInfo.pNext = 0;
+ layoutCreateInfo.flags = 0;
+ layoutCreateInfo.setLayoutCount = 2;
+ layoutCreateInfo.pSetLayouts = dsLayout;
+ layoutCreateInfo.pushConstantRangeCount = 0;
+ layoutCreateInfo.pPushConstantRanges = nullptr;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(),
+ &layoutCreateInfo,
+ nullptr,
+ &pipelineLayout));
+
+ // We need to enable the following extensions so that the compiler can correctly make spir-v
+ // from our glsl shaders.
+ fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+
+ this->finalizeShaders();
+
+ VkPipelineShaderStageCreateInfo shaderStageInfo[2];
+ SkAssertResult(CreateVkShaderModule(fGpu,
+ VK_SHADER_STAGE_VERTEX_BIT,
+ fVS,
+ &vertShaderModule,
+ &shaderStageInfo[0]));
+
+ SkAssertResult(CreateVkShaderModule(fGpu,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ fFS,
+ &fragShaderModule,
+ &shaderStageInfo[1]));
+
+ GrVkResourceProvider& resourceProvider = fGpu->resourceProvider();
+ GrVkPipeline* pipeline = resourceProvider.createPipeline(*args.fPipeline,
+ *args.fPrimitiveProcessor,
+ shaderStageInfo,
+ 2,
+ primitiveType,
+ renderPass,
+ pipelineLayout);
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), vertShaderModule,
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), fragShaderModule,
+ nullptr));
+
+ if (!pipeline) {
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout,
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[0],
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[1],
+ nullptr));
+ return nullptr;
+ }
+
+
+ GrVkDescriptorPool::DescriptorTypeCounts typeCounts;
+ typeCounts.setTypeCount(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2);
+ SkASSERT(numSamplers < 256);
+ typeCounts.setTypeCount(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (uint8_t)numSamplers);
+ GrVkDescriptorPool* descriptorPool =
+ fGpu->resourceProvider().findOrCreateCompatibleDescriptorPool(typeCounts);
+
+ VkDescriptorSetAllocateInfo dsAllocateInfo;
+ memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
+ dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsAllocateInfo.pNext = nullptr;
+ dsAllocateInfo.descriptorPool = descriptorPool->descPool();
+ dsAllocateInfo.descriptorSetCount = 2;
+ dsAllocateInfo.pSetLayouts = dsLayout;
+
+ VkDescriptorSet descriptorSets[2];
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), AllocateDescriptorSets(fGpu->device(),
+ &dsAllocateInfo,
+ descriptorSets));
+
+ return new GrVkProgram(fGpu,
+ pipeline,
+ pipelineLayout,
+ dsLayout,
+ descriptorPool,
+ descriptorSets,
+ fUniformHandles,
+ fUniformHandler.fUniforms,
+ fUniformHandler.fCurrentVertexUBOOffset,
+ fUniformHandler.fCurrentFragmentUBOOffset,
+ numSamplers,
+ fGeometryProcessor,
+ fXferProcessor,
+ fFragmentProcessors);
+}
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkProgramBuilder_DEFINED
+#define GrVkProgramBuilder_DEFINED
+
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "GrPipeline.h"
+#include "vk/GrVkUniformHandler.h"
+#include "vk/GrVkVaryingHandler.h"
+
+#include "shaderc/shaderc.h"
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkRenderPass;
+class GrVkProgram;
+
+class GrVkProgramBuilder : public GrGLSLProgramBuilder {
+public:
+ /** Generates a shader program.
+ *
+ * The program implements what is specified in the stages given as input.
+ * After successful generation, the builder result objects are available
+ * to be used.
+ * @return true if generation was successful.
+ */
+ static GrVkProgram* CreateProgram(GrVkGpu*,
+ const DrawArgs&,
+ GrPrimitiveType,
+ const GrVkRenderPass& renderPass);
+
+ const GrCaps* caps() const override;
+ const GrGLSLCaps* glslCaps() const override;
+
+ GrVkGpu* gpu() const { return fGpu; }
+
+ void finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) override;
+
+private:
+ GrVkProgramBuilder(GrVkGpu*, const DrawArgs&);
+
+ void emitSamplers(const GrProcessor&,
+ GrGLSLTextureSampler::TextureSamplerArray* outSamplers) override;
+
+ GrVkProgram* finalize(const DrawArgs& args,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass);
+
+ static bool CreateVkShaderModule(const GrVkGpu* gpu,
+ VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrVkGpu* fGpu;
+ GrVkVaryingHandler fVaryingHandler;
+ GrVkUniformHandler fUniformHandler;
+
+ SkTArray<UniformHandle> fSamplerUniforms;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+
+#endif
\ No newline at end of file
--- /dev/null
+/*\r
+* Copyright 2016 Google Inc.\r
+*\r
+* Use of this source code is governed by a BSD-style license that can be\r
+* found in the LICENSE file.\r
+*/\r
+\r
+#include "GrVkProgramDataManager.h"\r
+\r
+#include "GrVkGpu.h"\r
+#include "GrVkUniformBuffer.h"\r
+\r
+GrVkProgramDataManager::GrVkProgramDataManager(const UniformInfoArray& uniforms,\r
+ uint32_t vertexUniformSize,\r
+ uint32_t fragmentUniformSize)\r
+ : fVertexUniformSize(vertexUniformSize)\r
+ , fFragmentUniformSize(fragmentUniformSize) {\r
+ fVertexUniformData.reset(vertexUniformSize);\r
+ fFragmentUniformData.reset(fragmentUniformSize);\r
+ int count = uniforms.count();\r
+ fUniforms.push_back_n(count);\r
+ // We must add uniforms in same order is the UniformInfoArray so that UniformHandles already\r
+ // owned by other objects will still match up here.\r
+ for (int i = 0; i < count; i++) {\r
+ Uniform& uniform = fUniforms[i];\r
+ const GrVkUniformHandler::UniformInfo uniformInfo = uniforms[i];\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uniformInfo.fVariable.getArrayCount() ||\r
+ uniformInfo.fVariable.getArrayCount() > 0);\r
+ SkDEBUGCODE(\r
+ uniform.fArrayCount = uniformInfo.fVariable.getArrayCount();\r
+ uniform.fType = uniformInfo.fVariable.getType();\r
+ );\r
+ uniform.fBinding = uniformInfo.fBinding;\r
+ uniform.fOffset = uniformInfo.fUBOffset;\r
+ SkDEBUGCODE(\r
+ uniform.fSetNumber = uniformInfo.fSetNumber;\r
+ );\r
+ }\r
+}\r
+\r
+void GrVkProgramDataManager::set1f(UniformHandle u, float v0) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kFloat_GrSLType);\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, &v0, sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set1fv(UniformHandle u,\r
+ int arrayCount,\r
+ const float v[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kFloat_GrSLType);\r
+ SkASSERT(arrayCount > 0);\r
+ SkASSERT(arrayCount <= uni.fArrayCount ||\r
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, v, arrayCount * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set2f(UniformHandle u, float v0, float v1) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kVec2f_GrSLType);\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ float v[2] = { v0, v1 };\r
+ memcpy(buffer, v, 2 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set2fv(UniformHandle u,\r
+ int arrayCount,\r
+ const float v[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kVec2f_GrSLType);\r
+ SkASSERT(arrayCount > 0);\r
+ SkASSERT(arrayCount <= uni.fArrayCount ||\r
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, v, arrayCount * 2* sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kVec3f_GrSLType);\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ float v[3] = { v0, v1, v2 };\r
+ memcpy(buffer, v, 3 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set3fv(UniformHandle u,\r
+ int arrayCount,\r
+ const float v[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kVec3f_GrSLType);\r
+ SkASSERT(arrayCount > 0);\r
+ SkASSERT(arrayCount <= uni.fArrayCount ||\r
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, v, arrayCount * 3 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set4f(UniformHandle u, float v0, float v1, float v2, float v3) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kVec4f_GrSLType);\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ float v[4] = { v0, v1, v2, v3 };\r
+ memcpy(buffer, v, 4 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::set4fv(UniformHandle u,\r
+ int arrayCount,\r
+ const float v[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kVec4f_GrSLType);\r
+ SkASSERT(arrayCount > 0);\r
+ SkASSERT(arrayCount <= uni.fArrayCount ||\r
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, v, arrayCount * 4 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kMat33f_GrSLType);\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+\r
+ SkASSERT(sizeof(float) == 4);\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ memcpy(buffer, &matrix[0], 3 * sizeof(float));\r
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);\r
+ memcpy(buffer, &matrix[3], 3 * sizeof(float));\r
+ buffer = static_cast<char*>(buffer) + 4 * sizeof(float);\r
+ memcpy(buffer, &matrix[6], 3 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::setMatrix3fv(UniformHandle u,\r
+ int arrayCount,\r
+ const float matrices[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kMat33f_GrSLType);\r
+ SkASSERT(arrayCount > 0);\r
+ SkASSERT(arrayCount <= uni.fArrayCount ||\r
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ SkASSERT(sizeof(float) == 4);\r
+ buffer = static_cast<char*>(buffer)+uni.fOffset;\r
+ for (int i = 0; i < arrayCount; ++i) {\r
+ const float* matrix = &matrices[9 * i];\r
+ memcpy(buffer, &matrix[0], 3 * sizeof(float));\r
+ buffer = static_cast<char*>(buffer)+4 * sizeof(float);\r
+ memcpy(buffer, &matrix[3], 3 * sizeof(float));\r
+ buffer = static_cast<char*>(buffer)+4 * sizeof(float);\r
+ memcpy(buffer, &matrix[6], 3 * sizeof(float));\r
+ buffer = static_cast<char*>(buffer)+4 * sizeof(float);\r
+ }\r
+}\r
+\r
+\r
+void GrVkProgramDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kMat44f_GrSLType);\r
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, matrix, 16 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::setMatrix4fv(UniformHandle u,\r
+ int arrayCount,\r
+ const float matrices[]) const {\r
+ const Uniform& uni = fUniforms[u.toIndex()];\r
+ SkASSERT(uni.fType == kMat44f_GrSLType);\r
+ SkASSERT(arrayCount > 0);\r
+ SkASSERT(arrayCount <= uni.fArrayCount ||\r
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));\r
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);\r
+\r
+ void* buffer;\r
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {\r
+ buffer = fVertexUniformData.get();\r
+ } else {\r
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);\r
+ buffer = fFragmentUniformData.get();\r
+ }\r
+ buffer = static_cast<char*>(buffer) + uni.fOffset;\r
+ SkASSERT(sizeof(float) == 4);\r
+ memcpy(buffer, matrices, arrayCount * 16 * sizeof(float));\r
+}\r
+\r
+void GrVkProgramDataManager::uploadUniformBuffers(const GrVkGpu* gpu,\r
+ GrVkUniformBuffer* vertexBuffer,\r
+ GrVkUniformBuffer* fragmentBuffer) const {\r
+ if (vertexBuffer) {\r
+ vertexBuffer->addMemoryBarrier(gpu,\r
+ VK_ACCESS_UNIFORM_READ_BIT,\r
+ VK_ACCESS_HOST_WRITE_BIT,\r
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,\r
+ VK_PIPELINE_STAGE_HOST_BIT,\r
+ false);\r
+ SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), fVertexUniformSize));\r
+ }\r
+\r
+ if (fragmentBuffer) {\r
+ fragmentBuffer->addMemoryBarrier(gpu,\r
+ VK_ACCESS_UNIFORM_READ_BIT,\r
+ VK_ACCESS_HOST_WRITE_BIT,\r
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,\r
+ VK_PIPELINE_STAGE_HOST_BIT,\r
+ false);\r
+ SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(),\r
+ fFragmentUniformSize));\r
+ }\r
+}\r
--- /dev/null
+/*\r
+* Copyright 2016 Google Inc.\r
+*\r
+* Use of this source code is governed by a BSD-style license that can be\r
+* found in the LICENSE file.\r
+*/\r
+\r
+#ifndef GrVkProgramDataManager_DEFINED\r
+#define GrVkProgramDataManager_DEFINED\r
+\r
+#include "glsl/GrGLSLProgramDataManager.h"\r
+\r
+#include "vk/GrVkUniformHandler.h"\r
+\r
+class GrVkGpu;\r
+class GrVkUniformBuffer;\r
+\r
+class GrVkProgramDataManager : public GrGLSLProgramDataManager {\r
+public:\r
+ typedef GrVkUniformHandler::UniformInfoArray UniformInfoArray;\r
+\r
+ GrVkProgramDataManager(const UniformInfoArray&,\r
+ uint32_t vertexUniformSize,\r
+ uint32_t fragmentUniformSize);\r
+\r
+ void set1f(UniformHandle, float v0) const override;\r
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;\r
+ void set2f(UniformHandle, float, float) const override;\r
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;\r
+ void set3f(UniformHandle, float, float, float) const override;\r
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;\r
+ void set4f(UniformHandle, float, float, float, float) const override;\r
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;\r
+ // matrices are column-major, the first two upload a single matrix, the latter two upload\r
+ // arrayCount matrices into a uniform array.\r
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;\r
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;\r
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;\r
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;\r
+\r
+ // for nvpr only\r
+ void setPathFragmentInputTransform(VaryingHandle u, int components,\r
+ const SkMatrix& matrix) const override {\r
+ SkFAIL("Only supported in NVPR, which is not in vulkan");\r
+ }\r
+\r
+ void uploadUniformBuffers(const GrVkGpu* gpu,\r
+ GrVkUniformBuffer* vertexBuffer,\r
+ GrVkUniformBuffer* fragmentBuffer) const;\r
+private:\r
+ struct Uniform {\r
+ uint32_t fBinding;\r
+ uint32_t fOffset;\r
+ SkDEBUGCODE(\r
+ GrSLType fType;\r
+ int fArrayCount;\r
+ uint32_t fSetNumber;\r
+ );\r
+ };\r
+\r
+ uint32_t fVertexUniformSize;\r
+ uint32_t fFragmentUniformSize;\r
+\r
+ SkTArray<Uniform, true> fUniforms;\r
+\r
+ mutable SkAutoMalloc fVertexUniformData;\r
+ mutable SkAutoMalloc fFragmentUniformData;\r
+};\r
+\r
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrVkProgramDesc.h"
+
+//#include "GrVkProcessor.h"
+#include "GrProcessor.h"
+#include "GrPipeline.h"
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+#include "SkChecksum.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLCaps.h"
+
+#include "shaderc/shaderc.h"
+
+static void add_texture_key(GrProcessorKeyBuilder* b, const GrProcessor& proc,
+ const GrGLSLCaps& caps) {
+ int numTextures = proc.numTextures();
+ // Need two bytes per key (swizzle and target).
+ int word32Count = (proc.numTextures() + 1) / 2;
+ if (0 == word32Count) {
+ return;
+ }
+ uint16_t* k16 = SkTCast<uint16_t*>(b->add32n(word32Count));
+ for (int i = 0; i < numTextures; ++i) {
+ const GrTextureAccess& access = proc.textureAccess(i);
+ GrTexture* texture = access.getTexture();
+ k16[i] = SkToU16(caps.configTextureSwizzle(texture->config()).asKey());
+ }
+ // zero the last 16 bits if the number of textures is odd.
+ if (numTextures & 0x1) {
+ k16[numTextures] = 0;
+ }
+}
+
+/**
+* A function which emits a meta key into the key builder. This is required because shader code may
+* be dependent on properties of the effect that the effect itself doesn't use
+* in its key (e.g. the pixel format of textures used). So we create a meta-key for
+* every effect using this function. It is also responsible for inserting the effect's class ID
+* which must be different for every GrProcessor subclass. It can fail if an effect uses too many
+* transforms, etc, for the space allotted in the meta-key. NOTE, both FPs and GPs share this
+* function because it is hairy, though FPs do not have attribs, and GPs do not have transforms
+*/
+static bool gen_meta_key(const GrProcessor& proc,
+ const GrGLSLCaps& glslCaps,
+ uint32_t transformKey,
+ GrProcessorKeyBuilder* b) {
+ size_t processorKeySize = b->size();
+ uint32_t classID = proc.classID();
+
+ // Currently we allow 16 bits for the class id and the overall processor key size.
+ static const uint32_t kMetaKeyInvalidMask = ~((uint32_t)SK_MaxU16);
+ if ((processorKeySize | classID) & kMetaKeyInvalidMask) {
+ return false;
+ }
+
+ add_texture_key(b, proc, glslCaps);
+
+ uint32_t* key = b->add32n(2);
+ key[0] = (classID << 16) | SkToU32(processorKeySize);
+ key[1] = transformKey;
+ return true;
+}
+
+static bool gen_frag_proc_and_meta_keys(const GrPrimitiveProcessor& primProc,
+ const GrFragmentProcessor& fp,
+ const GrGLSLCaps& glslCaps,
+ GrProcessorKeyBuilder* b) {
+ for (int i = 0; i < fp.numChildProcessors(); ++i) {
+ if (!gen_frag_proc_and_meta_keys(primProc, fp.childProcessor(i), glslCaps, b)) {
+ return false;
+ }
+ }
+
+ fp.getGLSLProcessorKey(glslCaps, b);
+
+ return gen_meta_key(fp, glslCaps, primProc.getTransformKey(fp.coordTransforms(),
+ fp.numTransformsExclChildren()), b);
+}
+
+bool GrVkProgramDescBuilder::Build(GrProgramDesc* desc,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ const GrGLSLCaps& glslCaps) {
+ // The descriptor is used as a cache key. Thus when a field of the
+ // descriptor will not affect program generation (because of the attribute
+ // bindings in use or other descriptor field settings) it should be set
+ // to a canonical value to avoid duplicate programs with different keys.
+
+ GrVkProgramDesc* vkDesc = (GrVkProgramDesc*)desc;
+
+ GR_STATIC_ASSERT(0 == kProcessorKeysOffset % sizeof(uint32_t));
+ // Make room for everything up to the effect keys.
+ vkDesc->key().reset();
+ vkDesc->key().push_back_n(kProcessorKeysOffset);
+
+ GrProcessorKeyBuilder b(&vkDesc->key());
+
+ primProc.getGLSLProcessorKey(glslCaps, &b);
+ if (!gen_meta_key(primProc, glslCaps, 0, &b)) {
+ vkDesc->key().reset();
+ return false;
+ }
+
+ for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) {
+ const GrFragmentProcessor& fp = pipeline.getFragmentProcessor(i);
+ if (!gen_frag_proc_and_meta_keys(primProc, fp, glslCaps, &b)) {
+ vkDesc->key().reset();
+ return false;
+ }
+ }
+
+ const GrXferProcessor& xp = pipeline.getXferProcessor();
+ xp.getGLSLProcessorKey(glslCaps, &b);
+ if (!gen_meta_key(xp, glslCaps, 0, &b)) {
+ vkDesc->key().reset();
+ return false;
+ }
+
+ // --------DO NOT MOVE HEADER ABOVE THIS LINE--------------------------------------------------
+ // Because header is a pointer into the dynamic array, we can't push any new data into the key
+ // below here.
+ KeyHeader* header = vkDesc->atOffset<KeyHeader, kHeaderOffset>();
+
+ // make sure any padding in the header is zeroed.
+ memset(header, 0, kHeaderSize);
+
+ if (pipeline.readsFragPosition()) {
+ header->fFragPosKey =
+ GrGLSLFragmentShaderBuilder::KeyForFragmentPosition(pipeline.getRenderTarget());
+ } else {
+ header->fFragPosKey = 0;
+ }
+
+ header->fOutputSwizzle =
+ glslCaps.configOutputSwizzle(pipeline.getRenderTarget()->config()).asKey();
+
+ if (pipeline.ignoresCoverage()) {
+ header->fIgnoresCoverage = 1;
+ } else {
+ header->fIgnoresCoverage = 0;
+ }
+
+ header->fSnapVerticesToPixelCenters = pipeline.snapVerticesToPixelCenters();
+ header->fColorEffectCnt = pipeline.numColorFragmentProcessors();
+ header->fCoverageEffectCnt = pipeline.numCoverageFragmentProcessors();
+ vkDesc->finalize();
+ return true;
+}
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkProgramDesc_DEFINED
+#define GrVkProgramDesc_DEFINED
+
+#include "GrColor.h"
+#include "GrProgramDesc.h"
+#include "GrGpu.h"
+#include "GrTypesPriv.h"
+
+#include "shaderc/shaderc.h"
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkProgramDescBuilder;
+
+class GrVkProgramDesc : public GrProgramDesc {
+private:
+ friend class GrVkProgramDescBuilder;
+};
+
+/**
+ * This class can be used to build a GrProgramDesc. It also provides helpers for accessing
+ * GL specific info in the header.
+ */
+class GrVkProgramDescBuilder {
+public:
+ typedef GrProgramDesc::KeyHeader KeyHeader;
+ // The key, stored in fKey, is composed of five parts(first 2 are defined in the key itself):
+ // 1. uint32_t for total key length.
+ // 2. uint32_t for a checksum.
+ // 3. Header struct defined above.
+ // 4. Backend-specific information including per-processor keys and their key lengths.
+ // Each processor's key is a variable length array of uint32_t.
+ enum {
+ // Part 3.
+ kHeaderOffset = GrVkProgramDesc::kHeaderOffset,
+ kHeaderSize = SkAlign4(sizeof(KeyHeader)),
+ // Part 4.
+ // This is the offset into the backenend specific part of the key, which includes
+ // per-processor keys.
+ kProcessorKeysOffset = kHeaderOffset + kHeaderSize,
+ };
+
+ /**
+ * Builds a GL specific program descriptor
+ *
+ * @param GrPrimitiveProcessor The geometry
+ * @param GrPipeline The optimized drawstate. The descriptor will represent a program
+ * which this optstate can use to draw with. The optstate contains
+ * general draw information, as well as the specific color, geometry,
+ * and coverage stages which will be used to generate the GL Program for
+ * this optstate.
+ * @param GrVkGpu A GL Gpu, the caps and Gpu object are used to output processor specific
+ * parts of the descriptor.
+ * @param GrProgramDesc The built and finalized descriptor
+ **/
+ static bool Build(GrProgramDesc*,
+ const GrPrimitiveProcessor&,
+ const GrPipeline&,
+ const GrGLSLCaps&);
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkRenderPass.h"
+
+#include "GrVkFramebuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkUtil.h"
+
+void setup_simple_vk_attachment_description(VkAttachmentDescription* attachment,
+ VkFormat format,
+ uint32_t samples,
+ VkImageLayout layout) {
+ attachment->flags = 0;
+ attachment->format = format;
+ SkAssertResult(GrSampleCountToVkSampleCount(samples, &attachment->samples));
+ attachment->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachment->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachment->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachment->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachment->initialLayout = layout;
+ attachment->finalLayout = layout;
+}
+
+void GrVkRenderPass::initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target) {
+ // Get attachment information from render target. This includes which attachments the render
+ // target has (color, resolve, stencil) and the attachments format and sample count.
+ target.getAttachmentsDescriptor(&fAttachmentsDescriptor, &fAttachmentFlags);
+
+ uint32_t numAttachments = fAttachmentsDescriptor.fAttachmentCount;
+ // Attachment descriptions to be set on the render pass
+ SkTArray<VkAttachmentDescription> attachments(numAttachments);
+ attachments.reset(numAttachments);
+ memset(attachments.begin(), 0, numAttachments*sizeof(VkAttachmentDescription));
+
+ // Refs to attachments on the render pass (as described by teh VkAttachmentDescription above),
+ // that are used by the subpass.
+ VkAttachmentReference colorRef;
+ VkAttachmentReference resolveRef;
+ VkAttachmentReference stencilRef;
+ uint32_t currentAttachment = 0;
+
+ // Go through each of the attachment types (color, resolve, stencil) and set the necessary
+ // on the various Vk structs.
+ VkSubpassDescription subpassDesc;
+ memset(&subpassDesc, 0, sizeof(VkSubpassDescription));
+ subpassDesc.flags = 0;
+ subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpassDesc.inputAttachmentCount = 0;
+ subpassDesc.pInputAttachments = nullptr;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ // set up color attachment
+ setup_simple_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fColor.fFormat,
+ fAttachmentsDescriptor.fColor.fSamples,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ colorRef.attachment = currentAttachment++;
+ colorRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ subpassDesc.colorAttachmentCount = 1;
+ } else {
+ // I don't think there should ever be a time where we don't have a color attachment
+ SkASSERT(false);
+ colorRef.attachment = VK_ATTACHMENT_UNUSED;
+ colorRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ subpassDesc.colorAttachmentCount = 0;
+ }
+ subpassDesc.pColorAttachments = &colorRef;
+
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ // set up resolve attachment
+ setup_simple_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fResolve.fFormat,
+ fAttachmentsDescriptor.fResolve.fSamples,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ resolveRef.attachment = currentAttachment++;
+ // I'm really not sure what the layout should be for the resolve textures.
+ resolveRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ subpassDesc.pResolveAttachments = &resolveRef;
+ } else {
+ subpassDesc.pResolveAttachments = nullptr;
+ }
+
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ // set up stencil attachment
+ setup_simple_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fStencil.fFormat,
+ fAttachmentsDescriptor.fStencil.fSamples,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ stencilRef.attachment = currentAttachment++;
+ stencilRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ stencilRef.attachment = VK_ATTACHMENT_UNUSED;
+ stencilRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ subpassDesc.pDepthStencilAttachment = &stencilRef;
+
+ subpassDesc.preserveAttachmentCount = 0;
+ subpassDesc.pPreserveAttachments = nullptr;
+
+ SkASSERT(numAttachments == currentAttachment);
+
+ // Create the VkRenderPass compatible with the attachment descriptions above
+ VkRenderPassCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkRenderPassCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments.begin();
+ createInfo.subpassCount = 1;
+ createInfo.pSubpasses = &subpassDesc;
+ createInfo.dependencyCount = 0;
+ createInfo.pDependencies = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateRenderPass(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fRenderPass));
+}
+
+void GrVkRenderPass::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyRenderPass(gpu->device(), fRenderPass, nullptr));
+}
+
+// Works under the assumption that color attachment will always be the first attachment in our
+// attachment array if it exists.
+bool GrVkRenderPass::colorAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+// Works under the assumption that resolve attachment will always be after the color attachment.
+bool GrVkRenderPass::resolveAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+// Works under the assumption that stencil attachment will always be after the color and resolve
+// attachment.
+bool GrVkRenderPass::stencilAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+void GrVkRenderPass::getBeginInfo(const GrVkRenderTarget& target,
+ VkRenderPassBeginInfo* beginInfo,
+ VkSubpassContents* contents) const {
+ SkASSERT(this->isCompatible(target));
+
+ VkRect2D renderArea;
+ renderArea.offset = { 0, 0 };
+ renderArea.extent = { (uint32_t)target.width(), (uint32_t)target.height() };
+
+ memset(beginInfo, 0, sizeof(VkRenderPassBeginInfo));
+ beginInfo->sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo->pNext = nullptr;
+ beginInfo->renderPass = fRenderPass;
+ beginInfo->framebuffer = target.framebuffer()->framebuffer();
+ beginInfo->renderArea = renderArea;
+ beginInfo->clearValueCount = 0;
+ beginInfo->pClearValues = nullptr;
+
+ // Currently just assuming no secondary cmd buffers. This value will need to be update if we
+ // have them.
+ *contents = VK_SUBPASS_CONTENTS_INLINE;
+}
+
+bool GrVkRenderPass::isCompatible(const GrVkRenderTarget& target) const {
+ AttachmentsDescriptor desc;
+ AttachmentFlags flags;
+ target.getAttachmentsDescriptor(&desc, &flags);
+
+ if (flags != fAttachmentFlags) {
+ return false;
+ }
+
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fColor != desc.fColor) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fResolve != desc.fResolve) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fStencil != desc.fStencil) {
+ return false;
+ }
+ }
+
+ return true;
+}
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkRenderPass_DEFINED
+#define GrVkRenderPass_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkRenderTarget;
+
+class GrVkRenderPass : public GrVkResource {
+public:
+ GrVkRenderPass() : INHERITED(), fRenderPass(nullptr) {}
+ void initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target);
+
+ struct AttachmentsDescriptor {
+ struct AttachmentDesc {
+ VkFormat fFormat;
+ int fSamples;
+ AttachmentDesc() : fFormat(VK_FORMAT_UNDEFINED), fSamples(0) {}
+ bool operator==(const AttachmentDesc& right) const {
+ return (fFormat == right.fFormat && fSamples == right.fSamples);
+ }
+ bool operator!=(const AttachmentDesc& right) const {
+ return !(*this == right);
+ }
+ };
+ AttachmentDesc fColor;
+ AttachmentDesc fResolve;
+ AttachmentDesc fStencil;
+ uint32_t fAttachmentCount;
+ };
+
+ enum AttachmentFlags {
+ kColor_AttachmentFlag = 0x1,
+ kResolve_AttachmentFlag = 0x2,
+ kStencil_AttachmentFlag = 0x4,
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(AttachmentFlags);
+
+ // The following return the index of the render pass attachment array for the given attachment.
+ // If the render pass does not have the given attachment it will return false and not set the
+ // index value.
+ bool colorAttachmentIndex(uint32_t* index) const;
+ bool resolveAttachmentIndex(uint32_t* index) const;
+ bool stencilAttachmentIndex(uint32_t* index) const;
+
+ // Sets the VkRenderPassBeginInfo and VkRenderPassContents need to begin a render pass.
+ // TODO: In the future I expect this function will also take an optional render area instead of
+ // defaulting to the entire render target.
+ // TODO: Figure out if load clear values should be passed into this function or should be stored
+ // on the GrVkRenderPass at create time since we'll know at that point if we want to do a load
+ // clear.
+ void getBeginInfo(const GrVkRenderTarget& target,
+ VkRenderPassBeginInfo* beginInfo,
+ VkSubpassContents* contents) const;
+
+ // Returns whether or not the structure of a RenderTarget matches that of the VkRenderPass in
+ // this object. Specifically this compares that the number of attachments, format of
+ // attachments, and sample counts are all the same. This function is used in the creation of
+ // basic RenderPasses that can be used when creating a VkFrameBuffer object.
+ bool isCompatible(const GrVkRenderTarget& target) const;
+
+ VkRenderPass vkRenderPass() const { return fRenderPass; }
+
+private:
+ GrVkRenderPass(const GrVkRenderPass&);
+ GrVkRenderPass& operator=(const GrVkRenderPass&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkRenderPass fRenderPass;
+ AttachmentFlags fAttachmentFlags;
+ AttachmentsDescriptor fAttachmentsDescriptor;
+
+ typedef GrVkResource INHERITED;
+};
+
+GR_MAKE_BITFIELD_OPS(GrVkRenderPass::AttachmentFlags);
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkRenderTarget.h"
+
+#include "GrRenderTargetPriv.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkFramebuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(msaaResource)
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(desc.fSampleCnt);
+ // The plus 1 is to account for the resolve texture.
+ fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
+ this->createFramebuffer(gpu);
+ this->registerWithCache();
+ msaaResource->ref();
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ Derived)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(msaaResource)
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(desc.fSampleCnt);
+ // The plus 1 is to account for the resolve texture.
+ fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
+ this->createFramebuffer(gpu);
+ msaaResource->ref();
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(!desc.fSampleCnt);
+ fColorValuesPerPixel = 1;
+ this->createFramebuffer(gpu);
+ this->registerWithCache();
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView,
+ Derived)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(!desc.fSampleCnt);
+ fColorValuesPerPixel = 1;
+ this->createFramebuffer(gpu);
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource) {
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat);
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ const GrVkImage::Resource* msaaResource = nullptr;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (desc.fSampleCnt) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = desc.fSampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ msaaResource = GrVkImage::CreateResource(gpu, msImageDesc);
+
+ if (!msaaResource) {
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msaaResource->fImage;
+
+ // Create Resolve attachment view
+ resolveAttachmentView = GrVkImageView::Create(gpu, imageResource->fImage, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!resolveAttachmentView) {
+ msaaResource->unref(gpu);
+ return nullptr;
+ }
+ } else {
+ // Set color attachment image
+ colorImage = imageResource->fImage;
+ }
+
+ // Get color attachment view
+ const GrVkImageView* colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!colorAttachmentView) {
+ if (msaaResource) {
+ resolveAttachmentView->unref(gpu);
+ msaaResource->unref(gpu);
+ }
+ return NULL;
+ }
+
+ GrVkRenderTarget* texRT;
+ if (msaaResource) {
+ texRT = new GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, msaaResource,
+ colorAttachmentView, resolveAttachmentView);
+ msaaResource->unref(gpu);
+ } else {
+ texRT = new GrVkRenderTarget(gpu, desc, lifeCycle, imageResource,
+ colorAttachmentView);
+ }
+
+ return texRT;
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::CreateNewRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+
+ const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
+ if (!imageResource) {
+ return nullptr;
+ }
+
+ GrVkRenderTarget* rt = GrVkRenderTarget::Create(gpu, desc, lifeCycle, imageResource);
+ // Create() will increment the refCount of the image resource if it succeeds
+ imageResource->unref(gpu);
+
+ return rt;
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::CreateWrappedRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource) {
+ SkASSERT(imageResource);
+
+ // Note: we assume the caller will unref the imageResource
+ // Create() will increment the refCount, and we'll unref when we're done with it
+ return GrVkRenderTarget::Create(gpu, desc, lifeCycle, imageResource);
+}
+
+bool GrVkRenderTarget::completeStencilAttachment() {
+ this->createFramebuffer(this->getVkGpu());
+ return true;
+}
+
+void GrVkRenderTarget::createFramebuffer(GrVkGpu* gpu) {
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ }
+
+ // Vulkan requires us to create a compatible renderpass before we can create our framebuffer,
+ // so we use this to get a (cached) basic renderpass, only for creation.
+ fCachedSimpleRenderPass = gpu->resourceProvider().findOrCreateCompatibleRenderPass(*this);
+
+ // Stencil attachment view is stored in the base RT stencil attachment
+ const GrVkImageView* stencilView = this->stencilAttachmentView();
+ fFramebuffer = GrVkFramebuffer::Create(gpu, this->width(), this->height(),
+ fCachedSimpleRenderPass, fColorAttachmentView,
+ fResolveAttachmentView, stencilView);
+ SkASSERT(fFramebuffer);
+}
+
+void GrVkRenderTarget::getAttachmentsDescriptor(
+ GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* attachmentFlags) const {
+ int colorSamples = this->numColorSamples();
+ VkFormat colorFormat;
+ GrPixelConfigToVkFormat(this->config(), &colorFormat);
+ desc->fColor.fFormat = colorFormat;
+ desc->fColor.fSamples = colorSamples ? colorSamples : 1;
+ *attachmentFlags = GrVkRenderPass::kColor_AttachmentFlag;
+ uint32_t attachmentCount = 1;
+ if (colorSamples > 0) {
+ desc->fResolve.fFormat = colorFormat;
+ desc->fResolve.fSamples = 1;
+ *attachmentFlags |= GrVkRenderPass::kResolve_AttachmentFlag;
+ ++attachmentCount;
+ }
+
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ desc->fStencil.fFormat = vkStencil->vkFormat();
+ desc->fStencil.fSamples = vkStencil->numSamples() ? vkStencil->numSamples() : 1;
+ // Currently in vulkan stencil and color attachments must all have same number of samples
+ SkASSERT(desc->fColor.fSamples == desc->fStencil.fSamples);
+ *attachmentFlags |= GrVkRenderPass::kStencil_AttachmentFlag;
+ ++attachmentCount;
+ }
+ desc->fAttachmentCount = attachmentCount;
+}
+
+GrVkRenderTarget::~GrVkRenderTarget() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fMSAAImageResource);
+ SkASSERT(!fResolveAttachmentView);
+ SkASSERT(!fColorAttachmentView);
+ SkASSERT(!fFramebuffer);
+ SkASSERT(!fCachedSimpleRenderPass);
+}
+
+void GrVkRenderTarget::addResources(GrVkCommandBuffer& commandBuffer) const {
+ commandBuffer.addResource(this->framebuffer());
+ commandBuffer.addResource(this->resource());
+ commandBuffer.addResource(this->colorAttachmentView());
+ if (this->msaaImageResource()) {
+ commandBuffer.addResource(this->msaaImageResource());
+ commandBuffer.addResource(this->resolveAttachmentView());
+ }
+ if (this->stencilImageResource()) {
+ commandBuffer.addResource(this->stencilImageResource());
+ commandBuffer.addResource(this->stencilAttachmentView());
+ }
+}
+
+void GrVkRenderTarget::releaseInternalObjects() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ if (fMSAAImageResource) {
+ fMSAAImageResource->unref(gpu);
+ fMSAAImageResource = nullptr;
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unref(gpu);
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unref(gpu);
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::abandonInternalObjects() {
+ if (fMSAAImageResource) {
+ fMSAAImageResource->unrefAndAbandon();
+ fMSAAImageResource = nullptr;
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unrefAndAbandon();
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unrefAndAbandon();
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unrefAndAbandon();
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unrefAndAbandon();
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::onRelease() {
+ this->releaseInternalObjects();
+ if (this->shouldFreeResources()) {
+ this->releaseImage(this->getVkGpu());
+ } else {
+ this->abandonImage();
+ }
+
+ GrRenderTarget::onRelease();
+}
+
+void GrVkRenderTarget::onAbandon() {
+ this->abandonInternalObjects();
+ this->abandonImage();
+ GrRenderTarget::onAbandon();
+}
+
+
+GrBackendObject GrVkRenderTarget::getRenderTargetHandle() const {
+ // Currently just passing back the pointer to the main Image::Resource as the handle
+ return (GrBackendObject)&fResource;
+}
+
+const GrVkImage::Resource* GrVkRenderTarget::stencilImageResource() const {
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->imageResource();
+ }
+
+ return nullptr;
+}
+
+const GrVkImageView* GrVkRenderTarget::stencilAttachmentView() const {
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->stencilView();
+ }
+
+ return nullptr;
+}
+
+
+GrVkGpu* GrVkRenderTarget::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkRenderTarget_DEFINED
+#define GrVkRenderTarget_DEFINED
+
+#include "GrVkImage.h"
+#include "GrRenderTarget.h"
+
+#include "GrVkRenderPass.h"
+
+class GrVkCommandBuffer;
+class GrVkFramebuffer;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkStencilAttachment;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkRenderTarget: public GrRenderTarget, public virtual GrVkImage {
+public:
+ static GrVkRenderTarget* CreateNewRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkRenderTarget* CreateWrappedRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* resource);
+
+ ~GrVkRenderTarget() override;
+
+ const GrVkFramebuffer* framebuffer() const { return fFramebuffer; }
+ const GrVkImageView* colorAttachmentView() const { return fColorAttachmentView; }
+ const GrVkImage::Resource* msaaImageResource() const { return fMSAAImageResource; }
+ const GrVkImageView* resolveAttachmentView() const { return fResolveAttachmentView; }
+ const GrVkImage::Resource* stencilImageResource() const;
+ const GrVkImageView* stencilAttachmentView() const;
+
+ const GrVkRenderPass* simpleRenderPass() const { return fCachedSimpleRenderPass; }
+
+ // override of GrRenderTarget
+ ResolveType getResolveType() const override {
+ return kCanResolve_ResolveType;
+ }
+
+ bool canAttemptStencilAttachment() const override {
+ return true;
+ }
+
+ GrBackendObject getRenderTargetHandle() const override;
+
+ // Returns the total number of attachments
+ void getAttachmentsDescriptor(GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* flags) const;
+
+ void addResources(GrVkCommandBuffer& commandBuffer) const;
+
+protected:
+ enum Derived { kDerived };
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaImageResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaImageResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ Derived);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView,
+ Derived);
+
+ static GrVkRenderTarget* Create(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ // This accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
+ size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
+ SkASSERT(colorBytes > 0);
+ return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes;
+ }
+
+private:
+ bool completeStencilAttachment() override;
+
+ void createFramebuffer(GrVkGpu* gpu);
+
+ void releaseInternalObjects();
+ void abandonInternalObjects();
+
+ const GrVkFramebuffer* fFramebuffer;
+ const GrVkImageView* fColorAttachmentView;
+ const GrVkImage::Resource* fMSAAImageResource;
+ const GrVkImageView* fResolveAttachmentView;
+ int fColorValuesPerPixel;
+
+ // This is a cached pointer to a simple render pass. The render target should unref it
+ // once it is done with it.
+ const GrVkRenderPass* fCachedSimpleRenderPass;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkResource_DEFINED
+#define GrVkResource_DEFINED
+
+#include "SkAtomics.h"
+#include "SkTDynamicHash.h"
+#include "SkRandom.h"
+
+class GrVkGpu;
+
+// uncomment to enable tracing of resource refs
+//#ifdef SK_DEBUG
+//#define SK_TRACE_VK_RESOURCES
+//#endif
+
+/** \class GrVkResource
+
+ GrVkResource is the base class for Vulkan resources that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+
+ This is nearly identical to SkRefCntBase. The exceptions are that unref()
+ takes a GrVkGpu, and any derived classes must implement freeGPUData() and
+ possibly abandonSubResources().
+*/
+
+class GrVkResource : SkNoncopyable {
+public:
+ // Simple refCount tracing, to ensure that everything ref'ed is unref'ed.
+#ifdef SK_TRACE_VK_RESOURCES
+ static const uint32_t& GetKey(const GrVkResource& r) { return r.fKey; }
+ static uint32_t Hash(const uint32_t& k) { return k; }
+ static SkTDynamicHash<GrVkResource, uint32_t> fTrace;
+ static SkRandom fRandom;
+#endif
+
+ /** Default construct, initializing the reference count to 1.
+ */
+ GrVkResource() : fRefCnt(1) {
+#ifdef SK_TRACE_VK_RESOURCES
+ fKey = fRandom.nextU();
+ fTrace.add(this);
+#endif
+ }
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~GrVkResource() {
+#ifdef SK_DEBUG
+ SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt);
+ fRefCnt = 0; // illegal value, to catch us if we reuse after delete
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const { return fRefCnt; }
+#endif
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the reference count.
+ Must be balanced by a call to unref() or unrefAndFreeResources().
+ */
+ void ref() const {
+ SkASSERT(fRefCnt > 0);
+ (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required.
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ Any GPU data associated with this resource will be freed before it's deleted.
+ */
+ void unref(const GrVkGpu* gpu) const {
+ SkASSERT(fRefCnt > 0);
+ SkASSERT(gpu);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose(gpu);
+ }
+ }
+
+ /** Unref without freeing GPU data. Used only when we're abandoning the resource */
+ void unrefAndAbandon() const {
+ SkASSERT(fRefCnt > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fRefCnt > 0);
+ }
+#endif
+
+private:
+ /** Must be implemented by any subclasses.
+ * Deletes any Vk data associated with this resource
+ */
+ virtual void freeGPUData(const GrVkGpu* gpu) const = 0;
+
+ /** Must be overridden by subclasses that themselves store GrVkResources.
+ * Will unrefAndAbandon those resources without deleting the underlying Vk data
+ */
+ virtual void abandonSubResources() const {}
+
+ /**
+ * Called when the ref count goes to 0. Will free Vk resources.
+ */
+ void internal_dispose(const GrVkGpu* gpu) const {
+ this->freeGPUData(gpu);
+#ifdef SK_TRACE_VK_RESOURCES
+ fTrace.remove(GetKey(*this));
+#endif
+ SkASSERT(0 == fRefCnt);
+ fRefCnt = 1;
+ delete this;
+ }
+
+ /**
+ * Internal_dispose without freeing Vk resources. Used when we've lost context.
+ */
+ void internal_dispose() const {
+ this->abandonSubResources();
+#ifdef SK_TRACE_VK_RESOURCES
+ fTrace.remove(GetKey(*this));
+#endif
+ SkASSERT(0 == fRefCnt);
+ fRefCnt = 1;
+ delete this;
+ }
+
+ mutable int32_t fRefCnt;
+#ifdef SK_TRACE_VK_RESOURCES
+ uint32_t fKey;
+#endif
+
+ typedef SkNoncopyable INHERITED;
+};
+
+
+#endif
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkResourceProvider.h"
+
+#include "GrVkCommandBuffer.h"
+#include "GrVkPipeline.h"
+#include "GrVkRenderPass.h"
+#include "GrVkUtil.h"
+
+#ifdef SK_TRACE_VK_RESOURCES
+SkTDynamicHash<GrVkResource, uint32_t> GrVkResource::fTrace;
+SkRandom GrVkResource::fRandom;
+#endif
+
+GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu) : fGpu(gpu) {
+}
+
+GrVkResourceProvider::~GrVkResourceProvider() {
+ SkASSERT(0 == fSimpleRenderPasses.count());
+}
+
+GrVkPipeline* GrVkResourceProvider::createPipeline(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout) {
+
+ return GrVkPipeline::Create(fGpu, pipeline, primProc, shaderStageInfo, shaderStageCount,
+ primitiveType, renderPass, layout);
+}
+
+
+// To create framebuffers, we first need to create a simple RenderPass that is
+// only used for framebuffer creation. When we actually render we will create
+// RenderPasses as needed that are compatible with the framebuffer.
+const GrVkRenderPass*
+GrVkResourceProvider::findOrCreateCompatibleRenderPass(const GrVkRenderTarget& target) {
+ for (int i = 0; i < fSimpleRenderPasses.count(); ++i) {
+ GrVkRenderPass* renderPass = fSimpleRenderPasses[i];
+ if (renderPass->isCompatible(target)) {
+ renderPass->ref();
+ return renderPass;
+ }
+ }
+
+ GrVkRenderPass* renderPass = new GrVkRenderPass();
+ renderPass->initSimple(fGpu, target);
+ fSimpleRenderPasses.push_back(renderPass);
+ renderPass->ref();
+ return renderPass;
+}
+
+GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
+ const GrVkDescriptorPool::DescriptorTypeCounts& typeCounts) {
+ return new GrVkDescriptorPool(fGpu, typeCounts);
+}
+
+GrVkCommandBuffer* GrVkResourceProvider::createCommandBuffer() {
+ GrVkCommandBuffer* cmdBuffer = GrVkCommandBuffer::Create(fGpu, fGpu->cmdPool());
+ fActiveCommandBuffers.push_back(cmdBuffer);
+ cmdBuffer->ref();
+ return cmdBuffer;
+}
+
+void GrVkResourceProvider::checkCommandBuffers() {
+ for (int i = fActiveCommandBuffers.count()-1; i >= 0; --i) {
+ if (fActiveCommandBuffers[i]->finished(fGpu)) {
+ fActiveCommandBuffers[i]->unref(fGpu);
+ fActiveCommandBuffers.removeShuffle(i);
+ }
+ }
+}
+
+void GrVkResourceProvider::destroyResources() {
+ // release our current command buffers
+ for (int i = 0; i < fActiveCommandBuffers.count(); ++i) {
+ SkASSERT(fActiveCommandBuffers[i]->finished(fGpu));
+ SkASSERT(fActiveCommandBuffers[i]->unique());
+ fActiveCommandBuffers[i]->unref(fGpu);
+ }
+ fActiveCommandBuffers.reset();
+
+ // loop over all render passes to make sure we destroy all the internal VkRenderPasses
+ for (int i = 0; i < fSimpleRenderPasses.count(); ++i) {
+ fSimpleRenderPasses[i]->unref(fGpu);
+ }
+ fSimpleRenderPasses.reset();
+
+#ifdef SK_TRACE_VK_RESOURCES
+ SkASSERT(0 == GrVkResource::fTrace.count());
+#endif
+
+}
+
+void GrVkResourceProvider::abandonResources() {
+ // release our current command buffers
+ for (int i = 0; i < fActiveCommandBuffers.count(); ++i) {
+ SkASSERT(fActiveCommandBuffers[i]->finished(fGpu));
+ fActiveCommandBuffers[i]->unrefAndAbandon();
+ }
+ fActiveCommandBuffers.reset();
+
+ for (int i = 0; i < fSimpleRenderPasses.count(); ++i) {
+ fSimpleRenderPasses[i]->unrefAndAbandon();
+ }
+ fSimpleRenderPasses.reset();
+
+#ifdef SK_TRACE_VK_RESOURCES
+ SkASSERT(0 == GrVkResource::fTrace.count());
+#endif
+}
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkResourceProvider_DEFINED
+#define GrVkResourceProvider_DEFINED
+
+#include "GrVkDescriptorPool.h"
+#include "GrVkResource.h"
+#include "GrVkUtil.h"
+#include "SkTArray.h"
+
+#include "vulkan/vulkan.h"
+
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrVkCommandBuffer;
+class GrVkGpu;
+class GrVkPipeline;
+class GrVkRenderPass;
+class GrVkRenderTarget;
+
+class GrVkResourceProvider {
+public:
+ GrVkResourceProvider(GrVkGpu* gpu);
+ ~GrVkResourceProvider();
+
+ GrVkPipeline* createPipeline(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout);
+
+ // Finds or creates a simple render pass that matches the target, increments the refcount,
+ // and returns.
+ const GrVkRenderPass* findOrCreateCompatibleRenderPass(const GrVkRenderTarget& target);
+
+ GrVkCommandBuffer* createCommandBuffer();
+ void checkCommandBuffers();
+
+ // Finds or creates a compatible GrVkDescriptorPool for the requested DescriptorTypeCount.
+ // The refcount is incremented and a pointer returned.
+ // TODO: Currently this will just create a descriptor pool without holding onto a ref itself
+ // so we currently do not reuse them. Rquires knowing if another draw is currently using
+ // the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out
+ // of our cache of GrVkDescriptorPools.
+ GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(
+ const GrVkDescriptorPool::DescriptorTypeCounts& typeCounts);
+
+ // Destroy any cached resources. To be called before destroying the VkDevice.
+ // The assumption is that all queues are idle and all command buffers are finished.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void destroyResources();
+
+ // Abandon any cached resources. To be used when the context/VkDevice is lost.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void abandonResources();
+
+private:
+ GrVkGpu* fGpu;
+
+ // Array of RenderPasses that only have a single color attachment, optional stencil attachment,
+ // optional resolve attachment, and only one subpass
+ SkSTArray<4, GrVkRenderPass*> fSimpleRenderPasses;
+
+ // Array of CommandBuffers that are currently in flight
+ SkSTArray<4, GrVkCommandBuffer*> fActiveCommandBuffers;
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkSampler.h"
+
+#include "GrTextureAccess.h"
+#include "GrVkGpu.h"
+
+static inline VkSamplerAddressMode tile_to_vk_sampler_address(SkShader::TileMode tm) {\r
+ static const VkSamplerAddressMode gWrapModes[] = {\r
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,\r
+ VK_SAMPLER_ADDRESS_MODE_REPEAT,\r
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT\r
+ };\r
+ GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));\r
+ GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);\r
+ GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);\r
+ GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);\r
+ return gWrapModes[tm];\r
+}
+
+GrVkSampler* GrVkSampler::Create(const GrVkGpu* gpu, const GrTextureAccess& textureAccess) {
+
+ static VkFilter vkMinFilterModes[] = {\r
+ VK_FILTER_NEAREST,\r
+ VK_FILTER_LINEAR,\r
+ VK_FILTER_LINEAR\r
+ };\r
+ static VkFilter vkMagFilterModes[] = {\r
+ VK_FILTER_NEAREST,\r
+ VK_FILTER_LINEAR,\r
+ VK_FILTER_LINEAR\r
+ };
+
+ const GrTextureParams& params = textureAccess.getParams();
+
+ VkSamplerCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkSamplerCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ createInfo.pNext = 0;
+ createInfo.flags = 0;
+ createInfo.magFilter = vkMagFilterModes[params.filterMode()];
+ createInfo.minFilter = vkMinFilterModes[params.filterMode()];
+ createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ createInfo.addressModeU = tile_to_vk_sampler_address(params.getTileModeX());
+ createInfo.addressModeV = tile_to_vk_sampler_address(params.getTileModeY());
+ createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; // Shouldn't matter
+ createInfo.mipLodBias = 0.0f;
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.maxAnisotropy = 1.0f;
+ createInfo.compareEnable = VK_FALSE;
+ createInfo.compareOp = VK_COMPARE_OP_NEVER;
+ createInfo.minLod = 0.0f;
+ createInfo.maxLod = 0.0f;
+ createInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+
+ VkSampler sampler;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateSampler(gpu->device(),
+ &createInfo,
+ nullptr,
+ &sampler));
+
+ return new GrVkSampler(sampler);
+}
+
+void GrVkSampler::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fSampler);
+ GR_VK_CALL(gpu->vkInterface(), DestroySampler(gpu->device(), fSampler, nullptr));
+}
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkSampler_DEFINED
+#define GrVkSampler_DEFINED
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrTextureAccess;
+class GrVkGpu;
+
+
+class GrVkSampler : public GrVkResource {
+public:
+ static GrVkSampler* Create(const GrVkGpu* gpu, const GrTextureAccess& textureAccess);
+
+ VkSampler sampler() const { return fSampler; }
+
+private:
+ GrVkSampler(VkSampler sampler) : INHERITED(), fSampler(sampler) {}
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkSampler fSampler;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkStencilAttachment.h"
+#include "GrVkGpu.h"
+#include "GrVkImage.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkStencilAttachment::GrVkStencilAttachment(GrVkGpu* gpu,
+ GrGpuResource::LifeCycle lifeCycle,
+ const Format& format,
+ const GrVkImage::ImageDesc& desc,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* stencilView)
+ : INHERITED(gpu, lifeCycle, desc.fWidth, desc.fHeight, format.fStencilBits, desc.fSamples)
+ , fFormat(format)
+ , fImageResource(imageResource)
+ , fStencilView(stencilView) {
+ this->registerWithCache();
+ imageResource->ref();
+ stencilView->ref();
+}
+
+GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu,
+ GrGpuResource::LifeCycle lifeCycle,
+ int width,
+ int height,
+ int sampleCnt,
+ const Format& format) {
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = format.fInternalFormat;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = sampleCnt;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
+ if (!imageResource) {
+ return nullptr;
+ }
+
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, imageResource->fImage,
+ format.fInternalFormat,
+ GrVkImageView::kStencil_Type);
+ if (!imageView) {
+ imageResource->unref(gpu);
+ return nullptr;
+ }
+
+ GrVkStencilAttachment* stencil = new GrVkStencilAttachment(gpu, lifeCycle, format, imageDesc,
+ imageResource, imageView);
+ imageResource->unref(gpu);
+ imageView->unref(gpu);
+
+ return stencil;
+}
+
+GrVkStencilAttachment::~GrVkStencilAttachment() {
+ // should have been released or abandoned first
+ SkASSERT(!fImageResource);
+ SkASSERT(!fStencilView);
+}
+
+size_t GrVkStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= SkTMax(1,this->numSamples());
+ return static_cast<size_t>(size / 8);
+}
+
+void GrVkStencilAttachment::onRelease() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ fImageResource->unref(gpu);
+ fImageResource = nullptr;
+
+ fStencilView->unref(gpu);
+ fStencilView = nullptr;
+ INHERITED::onRelease();
+}
+
+void GrVkStencilAttachment::onAbandon() {
+ fImageResource->unrefAndAbandon();
+ fImageResource = nullptr;
+ fStencilView->unrefAndAbandon();
+ fStencilView = nullptr;
+ INHERITED::onAbandon();
+}
+
+GrVkGpu* GrVkStencilAttachment::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkStencil_DEFINED
+#define GrVkStencil_DEFINED
+
+#include "GrStencilAttachment.h"
+#include "GrVkImage.h"
+#include "vulkan/vulkan.h"
+
+class GrVkImageView;
+class GrVkGpu;
+
+class GrVkStencilAttachment : public GrStencilAttachment {
+public:
+ struct Format {
+ VkFormat fInternalFormat;
+ int fStencilBits;
+ int fTotalBits;
+ bool fPacked;
+ };
+
+ static GrVkStencilAttachment* Create(GrVkGpu* gpu, GrGpuResource::LifeCycle lifeCycle,
+ int width, int height,
+ int sampleCnt, const Format& format);
+
+ ~GrVkStencilAttachment() override;
+
+ const GrVkImage::Resource* imageResource() const { return fImageResource; }
+ const GrVkImageView* stencilView() const { return fStencilView; }
+
+ VkFormat vkFormat() const { return fFormat.fInternalFormat; }
+
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ GrVkStencilAttachment(GrVkGpu* gpu,
+ GrGpuResource::LifeCycle lifeCycle,
+ const Format& format,
+ const GrVkImage::ImageDesc&,
+ const GrVkImage::Resource*,
+ const GrVkImageView* stencilView);
+
+ GrVkGpu* getVkGpu() const;
+
+ Format fFormat;
+
+ const GrVkImage::Resource* fImageResource;
+ const GrVkImageView* fStencilView;
+
+ typedef GrStencilAttachment INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkTexture.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* view)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , INHERITED(gpu, lifeCycle, desc)
+ , fTextureView(view) {
+ this->registerWithCache();
+}
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* view,
+ Derived)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , INHERITED(gpu, lifeCycle, desc)
+ , fTextureView(view) {}
+
+
+GrVkTexture* GrVkTexture::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource) {
+ VkImage image = imageResource->fImage;
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, format,
+ GrVkImageView::kColor_Type);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ return new GrVkTexture(gpu, desc, lifeCycle, imageResource, imageView);
+}
+
+GrVkTexture* GrVkTexture::CreateNewTexture(GrVkGpu* gpu, const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
+ if (!imageResource) {
+ return nullptr;
+ }
+
+ GrVkTexture* texture = Create(gpu, desc, lifeCycle, imageDesc.fFormat, imageResource);
+ // Create() will increment the refCount of the image resource if it succeeds
+ imageResource->unref(gpu);
+
+ return texture;
+}
+
+GrVkTexture* GrVkTexture::CreateWrappedTexture(GrVkGpu* gpu, const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource) {
+ SkASSERT(imageResource);
+
+ // Note: we assume the caller will unref the imageResource
+ // Create() will increment the refCount, and we'll unref when we're done with it
+ return Create(gpu, desc, lifeCycle, format, imageResource);
+}
+
+GrVkTexture::~GrVkTexture() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fTextureView);
+}
+
+void GrVkTexture::onRelease() {
+ // we create this and don't hand it off, so we should always destroy it
+ if (fTextureView) {
+ fTextureView->unref(this->getVkGpu());
+ fTextureView = nullptr;
+ }
+
+ if (this->shouldFreeResources()) {
+ this->releaseImage(this->getVkGpu());
+ } else {
+ this->abandonImage();
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkTexture::onAbandon() {
+ if (fTextureView) {
+ fTextureView->unrefAndAbandon();
+ fTextureView = nullptr;
+ }
+
+ this->abandonImage();
+ INHERITED::onAbandon();
+}
+
+GrBackendObject GrVkTexture::getTextureHandle() const {
+ // Currently just passing back the pointer to the Resource as the handle
+ return (GrBackendObject)&fResource;
+}
+
+GrVkGpu* GrVkTexture::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTexture_DEFINED
+#define GrVkTexture_DEFINED
+
+#include "GrGpu.h"
+#include "GrTexture.h"
+#include "GrVkImage.h"
+
+class GrVkGpu;
+class GrVkImageView;
+
+class GrVkTexture : public GrTexture, public virtual GrVkImage {
+public:
+ static GrVkTexture* CreateNewTexture(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::ImageDesc&);
+
+
+ static GrVkTexture* CreateWrappedTexture(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ VkFormat, const GrVkImage::Resource*);
+
+ ~GrVkTexture() override;
+
+ GrBackendObject getTextureHandle() const override;
+
+ void textureParamsModified() override {}
+
+ const GrVkImageView* textureView() const { return fTextureView; }
+
+protected:
+ enum Derived { kDerived };
+
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, GrGpuResource::LifeCycle,
+ const GrVkImage::Resource*, const GrVkImageView* imageView);
+
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, GrGpuResource::LifeCycle,
+ const GrVkImage::Resource*, const GrVkImageView* imageView, Derived);
+
+ static GrVkTexture* Create(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle, VkFormat,
+ const GrVkImage::Resource* texImpl);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ const GrVkImageView* fTextureView;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkTextureRenderTarget.h"
+
+#include "GrRenderTargetPriv.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource) {
+
+ VkImage image = imageResource->fImage;
+ // Create the texture ImageView
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, format,
+ GrVkImageView::kColor_Type);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat);
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ const GrVkImage::Resource* msaaImageResource = nullptr;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (desc.fSampleCnt) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = desc.fSampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ msaaImageResource = GrVkImage::CreateResource(gpu, msImageDesc);
+
+ if (!msaaImageResource) {
+ imageView->unref(gpu);
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msaaImageResource->fImage;
+
+ // Create resolve attachment view if necessary.
+ // If the format matches, this is the same as the texture imageView.
+ if (pixelFormat == format) {
+ resolveAttachmentView = imageView;
+ resolveAttachmentView->ref();
+ } else {
+ resolveAttachmentView = GrVkImageView::Create(gpu, image, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!resolveAttachmentView) {
+ msaaImageResource->unref(gpu);
+ imageView->unref(gpu);
+ return nullptr;
+ }
+ }
+ } else {
+ // Set color attachment image
+ colorImage = imageResource->fImage;
+ }
+
+ const GrVkImageView* colorAttachmentView;
+ // Get color attachment view.
+ // If the format matches and there's no multisampling,
+ // this is the same as the texture imageView
+ if (pixelFormat == format && !resolveAttachmentView) {
+ colorAttachmentView = imageView;
+ colorAttachmentView->ref();
+ } else {
+ colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!colorAttachmentView) {
+ if (msaaImageResource) {
+ resolveAttachmentView->unref(gpu);
+ msaaImageResource->unref(gpu);
+ }
+ imageView->unref(gpu);
+ return nullptr;
+ }
+ }
+
+ GrVkTextureRenderTarget* texRT;
+ if (msaaImageResource) {
+ texRT = new GrVkTextureRenderTarget(gpu, desc, lifeCycle,
+ imageResource, imageView, msaaImageResource,
+ colorAttachmentView,
+ resolveAttachmentView);
+ msaaImageResource->unref(gpu);
+ } else {
+ texRT = new GrVkTextureRenderTarget(gpu, desc, lifeCycle,
+ imageResource, imageView,
+ colorAttachmentView);
+ }
+ return texRT;
+}
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::CreateNewTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(gpu, imageDesc);
+
+ if (!imageRsrc) {
+ return nullptr;
+ }
+
+ GrVkTextureRenderTarget* trt = GrVkTextureRenderTarget::Create(gpu, desc, lifeCycle,
+ imageDesc.fFormat, imageRsrc);
+ // Create() will increment the refCount of the image resource if it succeeds
+ imageRsrc->unref(gpu);
+
+ return trt;
+}
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ GrVkImage::Resource* imageRsrc) {
+ SkASSERT(imageRsrc);
+
+ // Note: we assume the caller will unref the imageResource
+ // Create() will increment the refCount, and we'll unref when we're done with it
+ return GrVkTextureRenderTarget::Create(gpu, desc, lifeCycle, format, imageRsrc);
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkTextureRenderTarget_DEFINED
+#define GrVkTextureRenderTarget_DEFINED
+
+#include "GrVkTexture.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkGpu.h"
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkImageView;
+
+class GrVkTextureRenderTarget: public GrVkTexture, public GrVkRenderTarget {
+public:
+ static GrVkTextureRenderTarget* CreateNewTextureRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkTextureRenderTarget* CreateWrappedTextureRenderTarget(GrVkGpu*,
+ const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ VkFormat,
+ GrVkImage::Resource*);
+
+protected:
+ void onAbandon() override {
+ GrVkRenderTarget::onAbandon();
+ GrVkTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrVkRenderTarget::onRelease();
+ GrVkTexture::onRelease();
+ }
+
+private:
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* texView,
+ const GrVkImage::Resource* msaaResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrVkTexture(gpu, desc, lifeCycle, imageResource, texView, GrVkTexture::kDerived)
+ , GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, msaaResource, colorAttachmentView,
+ resolveAttachmentView, GrVkRenderTarget::kDerived) {
+ this->registerWithCache();
+ }
+
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrVkTexture(gpu, desc, lifeCycle, imageResource, texView, GrVkTexture::kDerived)
+ , GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, colorAttachmentView,
+ GrVkRenderTarget::kDerived) {
+ this->registerWithCache();
+ }
+
+ static GrVkTextureRenderTarget* Create(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource);
+
+ // GrGLRenderTarget accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ return GrVkRenderTarget::onGpuMemorySize();
+ }
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkTransferBuffer.h"
+#include "GrVkGpu.h"
+#include "SkTraceMemoryDump.h"
+
+
+GrVkTransferBuffer* GrVkTransferBuffer::Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = false;
+ SkASSERT(GrVkBuffer::kCopyRead_Type == type || GrVkBuffer::kCopyWrite_Type == type);
+ desc.fType = type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkTransferBuffer* buffer = new GrVkTransferBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache();
+}
+
+void GrVkTransferBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkTransferBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU64((uint64_t)this->buffer());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "vk_buffer",
+ buffer_id.c_str());
+}
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkTransferBuffer_DEFINED
+#define GrVkTransferBuffer_DEFINED
+
+#include "GrTransferBuffer.h"
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkTransferBuffer : public GrTransferBuffer, public GrVkBuffer {
+
+public:
+ static GrVkTransferBuffer* Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+ void* onMap() override {
+ if (!this->wasDestroyed()) {
+ return this->vkMap(this->getVkGpu());
+ } else {
+ return nullptr;
+ }
+ }
+
+ void onUnmap() override {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+ }
+
+ GrVkGpu* getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return reinterpret_cast<GrVkGpu*>(this->getGpu());
+ }
+
+ typedef GrTransferBuffer INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkUniformBuffer.h"
+#include "GrVkGpu.h"
+
+
+GrVkUniformBuffer* GrVkUniformBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ if (0 == size) {
+ return nullptr;
+ }
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kUniform_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkUniformBuffer* buffer = new GrVkUniformBuffer(desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformBuffer_DEFINED
+#define GrVkUniformBuffer_DEFINED
+
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkUniformBuffer : public GrVkBuffer {
+
+public:
+ static GrVkUniformBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+ void* map(const GrVkGpu* gpu) {
+ return this->vkMap(gpu);
+ }
+ void unmap(const GrVkGpu* gpu) {
+ this->vkUnmap(gpu);
+ }
+ bool updateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) {
+ return this->vkUpdateData(gpu, src, srcSizeInBytes);
+ }
+ void release(const GrVkGpu* gpu) {
+ this->vkRelease(gpu);
+ }
+ void abandon() {
+ this->vkAbandon();
+ }
+
+private:
+ GrVkUniformBuffer(const GrVkBuffer::Desc& desc, const GrVkBuffer::Resource* resource)
+ : INHERITED(desc, resource) {
+ };
+
+ typedef GrVkBuffer INHERITED;
+};
+
+#endif
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkUniformHandler.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
+// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
+// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
+uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ SkASSERT(GrSLTypeIsFloatType(type));
+ static const uint32_t kAlignments[kGrSLTypeCount] = {
+ 0x0, // kVoid_GrSLType, should never return this
+ 0x3, // kFloat_GrSLType
+ 0x7, // kVec2f_GrSLType
+ 0xF, // kVec3f_GrSLType
+ 0xF, // kVec4f_GrSLType
+ 0xF, // kMat33f_GrSLType
+ 0xF, // kMat44f_GrSLType
+ 0x0, // Sampler2D_GrSLType, should never return this
+ 0x0, // SamplerExternal_GrSLType, should never return this
+ };
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(7 == kSampler2D_GrSLType);
+ GR_STATIC_ASSERT(8 == kSamplerExternal_GrSLType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAlignments) == kGrSLTypeCount);
+ return kAlignments[type];
+}
+
+// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
+// taking into consideration all alignment requirements. The uniformOffset is set to the offset for
+// the new uniform, and currentOffset is updated to be the offset to the end of the new uniform.
+void get_ubo_aligned_offset(uint32_t* uniformOffset,
+ uint32_t* currentOffset,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ *uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ // We use a 0 arrayCount to indicate it is not an array type but we still need to count the one
+ // object.
+ int count = arrayCount ? arrayCount : 1;
+ *currentOffset = *uniformOffset + count * (uint32_t)GrSLTypeSize(type);
+}
+
+GrGLSLUniformHandler::UniformHandle GrVkUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ SkDEBUGCODE(static const uint32_t kVisibilityMask = kVertex_GrShaderFlag|kFragment_GrShaderFlag);
+ SkASSERT(0 == (~kVisibilityMask & visibility));
+ SkASSERT(0 != visibility);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeIsFloatType(type));
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0]) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ // For now asserting the the visibility is either only vertex or only fragment
+ SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
+ uni.fVisibility = visibility;
+ uni.fVariable.setPrecision(precision);
+ if (GrSLTypeIsFloatType(type)) {
+ // When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
+ // we set the modifier to none for all uniforms declared inside the block.
+ uni.fVariable.setTypeModifier(GrGLSLShaderVar::kNone_TypeModifier);
+
+ uint32_t* currentOffset = kVertex_GrShaderFlag == visibility ? &fCurrentVertexUBOOffset
+ : &fCurrentFragmentUBOOffset;
+ get_ubo_aligned_offset(&uni.fUBOffset, currentOffset, type, arrayCount);
+ uni.fSetNumber = kUniformBufferDescSet;
+ uni.fBinding = kVertex_GrShaderFlag == visibility ? kVertexBinding : kFragBinding;
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+ } else {
+ SkASSERT(type == kSampler2D_GrSLType);
+ uni.fVariable.setTypeModifier(GrGLSLShaderVar::kUniform_TypeModifier);
+
+ uni.fSetNumber = kSamplerDescSet;
+ uni.fBinding = fCurrentSamplerBinding++;
+ uni.fUBOffset = 0; // This value will be ignored, but initializing to avoid any errors.
+ SkString layoutQualifier;
+ layoutQualifier.appendf("set=%d, binding=%d", uni.fSetNumber, uni.fBinding);
+ uni.fVariable.setLayoutQualifier(layoutQualifier.c_str());
+ }
+
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ SkTArray<UniformInfo*> uniformBufferUniform;
+ // Used to collect all the variables that will be place inside the uniform buffer
+ SkString uniformsString;
+ SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
+ uint32_t uniformBinding = (visibility == kVertex_GrShaderFlag) ? kVertexBinding : kFragBinding;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (visibility == localUniform.fVisibility) {
+ if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
+ SkASSERT(uniformBinding == localUniform.fBinding);
+ SkASSERT(kUniformBufferDescSet == localUniform.fSetNumber);
+ localUniform.fVariable.appendDecl(fProgramBuilder->glslCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ } else {
+ SkASSERT(localUniform.fVariable.getType() == kSampler2D_GrSLType);
+ SkASSERT(kSamplerDescSet == localUniform.fSetNumber);
+ localUniform.fVariable.appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";\n");
+ }
+ }
+ }
+ if (!uniformsString.isEmpty()) {
+ const char* stage = (visibility == kVertex_GrShaderFlag) ? "vertex" : "fragment";
+ out->appendf("layout (set=%d, binding=%d) uniform %sUniformBuffer\n{\n",
+ kUniformBufferDescSet, uniformBinding, stage);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformHandler_DEFINED
+#define GrVkUniformHandler_DEFINED
+
+#include "glsl/GrGLSLUniformHandler.h"
+
+#include "GrAllocator.h"
+#include "glsl/GrGLSLShaderVar.h"
+
+static const int kUniformsPerBlock = 8;
+
+class GrVkUniformHandler : public GrGLSLUniformHandler {
+public:
+ enum {
+ kSamplerDescSet = 0,
+ kUniformBufferDescSet = 1,
+ };
+ enum {
+ kVertexBinding = 0,
+ kFragBinding = 1,
+ };
+
+ // fUBOffset is only valid if the GrSLType of the fVariable is not a sampler
+ struct UniformInfo {
+ GrGLSLShaderVar fVariable;
+ uint32_t fVisibility;
+ uint32_t fSetNumber;
+ uint32_t fBinding;
+ uint32_t fUBOffset;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+
+ const GrGLSLShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+
+private:
+ explicit GrVkUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fCurrentVertexUBOOffset(0)
+ , fCurrentFragmentUBOOffset(0)
+ , fCurrentSamplerBinding(0) {
+ }
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ void appendUniformDecls(GrShaderFlags, SkString*) const override;
+
+ bool hasVertexUniforms() const { return fCurrentVertexUBOOffset > 0; }
+ bool hasFragmentUniforms() const { return fCurrentFragmentUBOOffset > 0; }
+
+
+ const UniformInfo& getUniformInfo(UniformHandle u) const {
+ return fUniforms[u.toIndex()];
+ }
+
+
+ UniformInfoArray fUniforms;
+ uint32_t fCurrentVertexUBOOffset;
+ uint32_t fCurrentFragmentUBOOffset;
+ uint32_t fCurrentSamplerBinding;
+
+ friend class GrVkProgramBuilder;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkUtil.h"
+
+bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format) {
+ VkFormat dontCare;
+ if (!format) {
+ format = &dontCare;
+ }
+
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ *format = VK_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case kBGRA_8888_GrPixelConfig:
+ *format = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case kSRGBA_8888_GrPixelConfig:
+ *format = VK_FORMAT_R8G8B8A8_SRGB;
+ break;
+ case kRGB_565_GrPixelConfig:
+ *format = VK_FORMAT_R5G6B5_UNORM_PACK16;
+ break;
+ case kRGBA_4444_GrPixelConfig:
+ *format = VK_FORMAT_R4G4B4A4_UNORM_PACK16;
+ break;
+ case kIndex_8_GrPixelConfig:
+ // No current rad support for this config
+ return false;
+ case kAlpha_8_GrPixelConfig:
+ *format = VK_FORMAT_R8_UNORM;
+ break;
+ case kETC1_GrPixelConfig:
+ // converting to ETC2 which is a superset of ETC1
+ *format = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ break;
+ case kLATC_GrPixelConfig:
+ // No current rad support for this config
+ return false;
+ case kR11_EAC_GrPixelConfig:
+ *format = VK_FORMAT_EAC_R11_UNORM_BLOCK;
+ break;
+ case kASTC_12x12_GrPixelConfig:
+ *format = VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ break;
+ case kRGBA_float_GrPixelConfig:
+ *format = VK_FORMAT_R32G32B32A32_SFLOAT;
+ break;
+ case kRGBA_half_GrPixelConfig:
+ *format = VK_FORMAT_R16G16B16A16_SFLOAT;
+ break;
+ case kAlpha_half_GrPixelConfig:
+ *format = VK_FORMAT_R16_SFLOAT;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
+ switch (samples) {
+ case 0: // fall through
+ case 1:
+ *vkSamples = VK_SAMPLE_COUNT_1_BIT;
+ return true;
+ case 2:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 4:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 8:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 16:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 32:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 64:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ default:
+ return false;
+ }
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkUtil_DEFINED
+#define GrVkUtil_DEFINED
+
+#include "GrColor.h"
+#include "GrTypes.h"
+#include "vk/GrVkInterface.h"
+
+#include "vulkan/vulkan.h"
+
+// makes a Vk call on the interface
+#define GR_VK_CALL(IFACE, X) (IFACE)->fFunctions.f##X;
+// same as GR_VK_CALL but checks for success
+#ifdef SK_DEBUG
+#define GR_VK_CALL_ERRCHECK(IFACE, X) \
+ VkResult SK_MACRO_APPEND_LINE(ret) = GR_VK_CALL(IFACE, X); \
+ SkASSERT(VK_SUCCESS == SK_MACRO_APPEND_LINE(ret));
+#else
+#define GR_VK_CALL_ERRCHECK(IFACE, X) (void) GR_VK_CALL(IFACE, X);
+#endif
+
+/**
+ * Returns the vulkan texture format for the given GrPixelConfig
+ */
+bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format);
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
+
+#endif
+
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkVaryingHandler.h"
+
+\r
+void finalize_helper(GrVkVaryingHandler::VarArray& vars) {\r
+ for (int i = 0; i < vars.count(); ++i) {\r
+ SkString location;\r
+ location.appendf("location = %d", i);\r
+ vars[i].setLayoutQualifier(location.c_str());\r
+ }\r
+}
+
+void GrVkVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);\r
+ finalize_helper(fVertexOutputs);\r
+ finalize_helper(fGeomInputs);\r
+ finalize_helper(fGeomOutputs);\r
+ finalize_helper(fFragInputs);\r
+ finalize_helper(fFragOutputs);
+}
\ No newline at end of file
--- /dev/null
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkVaryingHandler_DEFINED
+#define GrVkVaryingHandler_DEFINED
+
+#include "glsl/GrGLSLVarying.h"
+
+class GrVkVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrVkVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrVkProgramBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkVertexBuffer.h"
+#include "GrVkGpu.h"
+
+GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache();
+}
+
+GrVkVertexBuffer* GrVkVertexBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kVertex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkVertexBuffer* buffer = new GrVkVertexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+void GrVkVertexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkVertexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void* GrVkVertexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ return this->vkMap(this->getVkGpu());
+ } else {
+ return NULL;
+ }
+}
+
+void GrVkVertexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkVertexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkVertexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
--- /dev/null
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkVertexBuffer_DEFINED
+#define GrVkVertexBuffer_DEFINED
+
+#include "GrVertexBuffer.h"
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkVertexBuffer : public GrVertexBuffer, public GrVkBuffer {
+public:
+ static GrVkVertexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void* onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrVertexBuffer INHERITED;
+};
+
+#endif
--- /dev/null
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This is a GPU-backend specific test. It relies on static intializers to work
+
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU && SK_ALLOW_STATIC_GLOBAL_INITIALIZERS && defined(SK_VULKAN)
+
+#include "GrContextFactory.h"
+#include "GrTest.h"
+#include "Test.h"
+#include "vk/GrVkGpu.h"
+
+bool does_full_buffer_contain_correct_color(GrColor* buffer,
+ GrColor clearColor,
+ GrPixelConfig config,
+ int width,
+ int height) {
+ GrColor matchColor;
+ if (kRGBA_8888_GrPixelConfig == config) {
+ matchColor = clearColor;
+ } else if (kBGRA_8888_GrPixelConfig) {
+ // Hack to flip the R, B componets in the GrColor so that the comparrison will work below
+ matchColor = GrColorPackRGBA(GrColorUnpackB(clearColor),
+ GrColorUnpackG(clearColor),
+ GrColorUnpackR(clearColor),
+ GrColorUnpackA(clearColor));
+ } else {
+ // currently only supporting rgba_8888 and bgra_8888
+ return false;
+ }
+
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ if (buffer[j * width + i] != matchColor) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void basic_clear_test(skiatest::Reporter* reporter, GrContext* context, GrPixelConfig config) {
+ GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
+ gpu->discard(NULL);
+ SkAutoTMalloc<GrColor> buffer(25);
+
+ GrSurfaceDesc surfDesc;
+ surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
+ surfDesc.fOrigin = kTopLeft_GrSurfaceOrigin;
+ surfDesc.fWidth = 5;
+ surfDesc.fHeight = 5;
+ surfDesc.fConfig = config;
+ surfDesc.fSampleCnt = 0;
+ GrTexture* tex = gpu->createTexture(surfDesc, false, nullptr, 0);
+ SkASSERT(tex);
+ SkASSERT(tex->asRenderTarget());
+ SkIRect rect = SkIRect::MakeWH(5, 5);
+
+ gpu->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
+
+ gpu->readPixels(tex, 0, 0, 5, 5, config, (void*)buffer.get(), 0);
+
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(),
+ GrColor_TRANSPARENT_BLACK,
+ config,
+ 5,
+ 5));
+
+ gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget());
+
+ gpu->readPixels(tex, 0, 0, 5, 5, config, (void*)buffer.get(), 0);
+
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(),
+ GrColor_WHITE,
+ config,
+ 5,
+ 5));
+
+ GrColor myColor = GrColorPackRGBA(0xFF, 0x7F, 0x40, 0x20);
+
+ gpu->clear(rect, myColor, tex->asRenderTarget());
+
+ gpu->readPixels(tex, 0, 0, 5, 5, config, (void*)buffer.get(), 0);
+
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(),
+ myColor,
+ config,
+ 5,
+ 5));
+}
+
+void sub_clear_test(skiatest::Reporter* reporter, GrContext* context, GrPixelConfig config) {
+ const int width = 10;
+ const int height = 10;
+ const int subWidth = width/2;
+ const int subHeight = height/2;
+ GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
+ gpu->discard(NULL);
+ SkAutoTMalloc<GrColor> buffer(width * height);
+ SkAutoTMalloc<GrColor> subBuffer(subWidth * subHeight);
+
+ GrSurfaceDesc surfDesc;
+ surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
+ surfDesc.fOrigin = kTopLeft_GrSurfaceOrigin;
+ surfDesc.fWidth = width;
+ surfDesc.fHeight = height;
+ surfDesc.fConfig = config;
+ surfDesc.fSampleCnt = 0;
+ GrTexture* tex = gpu->createTexture(surfDesc, false, nullptr, 0);
+ SkASSERT(tex);
+ SkASSERT(tex->asRenderTarget());
+
+ SkIRect fullRect = SkIRect::MakeWH(10, 10);
+ gpu->clear(fullRect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
+
+ gpu->readPixels(tex, 0, 0, width, height, config, (void*)buffer.get(), 0);
+
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(),
+ GrColor_TRANSPARENT_BLACK,
+ config,
+ width,
+ height));
+ SkIRect rect;
+ rect = SkIRect::MakeXYWH(0, 0, subWidth, subHeight);
+ gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget());
+ rect = SkIRect::MakeXYWH(subWidth, 0, subWidth, subHeight);
+ gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget());
+ rect = SkIRect::MakeXYWH(0, subHeight, subWidth, subHeight);
+ gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget());
+
+ // Should fail since bottom right sub area has not been cleared to white
+ gpu->readPixels(tex, 0, 0, width, height, config, (void*)buffer.get(), 0);
+ REPORTER_ASSERT(reporter, !does_full_buffer_contain_correct_color(buffer.get(),
+ GrColor_WHITE,
+ config,
+ width,
+ height));
+
+ rect = SkIRect::MakeXYWH(subWidth, subHeight, subWidth, subHeight);
+ gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget());
+
+ gpu->readPixels(tex, 0, 0, width, height, config, (void*)buffer.get(), 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(),
+ GrColor_WHITE,
+ config,
+ width,
+ height));
+
+ // Try different colors and that each sub area has correct color
+ GrColor subColor1 = GrColorPackRGBA(0xFF, 0x00, 0x00, 0xFF);
+ GrColor subColor2 = GrColorPackRGBA(0x00, 0xFF, 0x00, 0xFF);
+ GrColor subColor3 = GrColorPackRGBA(0x00, 0x00, 0xFF, 0xFF);
+ GrColor subColor4 = GrColorPackRGBA(0xFF, 0xFF, 0x00, 0xFF);
+
+ rect = SkIRect::MakeXYWH(0, 0, subWidth, subHeight);
+ gpu->clear(rect, subColor1, tex->asRenderTarget());
+ rect = SkIRect::MakeXYWH(subWidth, 0, subWidth, subHeight);
+ gpu->clear(rect, subColor2, tex->asRenderTarget());
+ rect = SkIRect::MakeXYWH(0, subHeight, subWidth, subHeight);
+ gpu->clear(rect, subColor3, tex->asRenderTarget());
+ rect = SkIRect::MakeXYWH(subWidth, subHeight, subWidth, subHeight);
+ gpu->clear(rect, subColor4, tex->asRenderTarget());
+
+ gpu->readPixels(tex, 0, 0, subWidth, subHeight, config, (void*)subBuffer.get(), 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(),
+ subColor1,
+ config,
+ subWidth,
+ subHeight));
+ gpu->readPixels(tex, subWidth, 0, subWidth, subHeight, config, (void*)subBuffer.get(), 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(),
+ subColor2,
+ config,
+ subWidth,
+ subHeight));
+ gpu->readPixels(tex, 0, subHeight, subWidth, subHeight, config, (void*)subBuffer.get(), 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(),
+ subColor3,
+ config,
+ subWidth,
+ subHeight));
+ gpu->readPixels(tex, subWidth, subHeight, subWidth, subHeight,
+ config, (void*)subBuffer.get(), 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(),
+ subColor4,
+ config,
+ subWidth,
+ subHeight));
+}
+
+DEF_GPUTEST(VkClearTests, reporter, factory) {
+ GrContextOptions opts;
+ opts.fSuppressPrints = true;
+ GrContextFactory debugFactory(opts);
+ for (int type = 0; type < GrContextFactory::kLastGLContextType; ++type) {
+ if (static_cast<GrContextFactory::GLContextType>(type) !=
+ GrContextFactory::kNative_GLContextType) {
+ continue;
+ }
+ GrContext* context = debugFactory.get(static_cast<GrContextFactory::GLContextType>(type));
+ if (context) {
+ basic_clear_test(reporter, context, kRGBA_8888_GrPixelConfig);
+ basic_clear_test(reporter, context, kBGRA_8888_GrPixelConfig);
+ sub_clear_test(reporter, context, kRGBA_8888_GrPixelConfig);
+ sub_clear_test(reporter, context, kBGRA_8888_GrPixelConfig);
+ }
+
+ }
+}
+
+#endif
--- /dev/null
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// This is a GPU-backend specific test. It relies on static intializers to work
+
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU && SK_ALLOW_STATIC_GLOBAL_INITIALIZERS && defined(SK_VULKAN)
+
+#include "GrContextFactory.h"
+#include "GrTest.h"
+#include "Test.h"
+#include "vk/GrVkGpu.h"
+
+
+void fill_pixel_data(int width, int height, GrColor* data) {
+
+ // build red-green gradient
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ unsigned int red = (unsigned int)(256.f*(i / (float)width));
+ unsigned int green = (unsigned int)(256.f*(j / (float)height));
+ data[i + j*width] = GrColorPackRGBA(red - (red>>8), green - (green>>8), 0xff, 0xff);
+ }
+ }
+}
+
+bool does_full_buffer_contain_correct_color(GrColor* srcBuffer,
+ GrColor* dstBuffer,
+ GrPixelConfig config,
+ int width,
+ int height) {
+ GrColor* srcPtr = srcBuffer;
+ GrColor* dstPtr = dstBuffer;
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ if (srcPtr[i] != dstPtr[i]) {
+ return false;
+ }
+ }
+ srcPtr += width;
+ dstPtr += width;
+ }
+ return true;
+}
+
+void basic_texture_test(skiatest::Reporter* reporter, GrContext* context, GrPixelConfig config,
+ bool renderTarget, bool linearTiling) {
+ GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
+ gpu->discard(NULL);
+
+ const int kWidth = 16;
+ const int kHeight = 16;
+ SkAutoTMalloc<GrColor> srcBuffer(kWidth*kHeight);
+ SkAutoTMalloc<GrColor> dstBuffer(kWidth*kHeight);
+
+ fill_pixel_data(kWidth, kHeight, srcBuffer.get());
+
+ const GrVkCaps* caps = reinterpret_cast<const GrVkCaps*>(context->caps());
+
+ bool canCreate = true;
+ // the expectation is that the given config is texturable/renderable with optimal tiling
+ // but may not be with linear tiling
+ if (linearTiling) {
+ if (!caps->isConfigTexurableLinearly(config) ||
+ (renderTarget && !caps->isConfigRenderableLinearly(config, false))) {
+ canCreate = false;
+ }
+ }
+
+ GrSurfaceDesc surfDesc;
+ surfDesc.fFlags = renderTarget ? kRenderTarget_GrSurfaceFlag : kNone_GrSurfaceFlags;
+ if (linearTiling) {
+ surfDesc.fFlags |= kZeroCopy_GrSurfaceFlag;
+ }
+ surfDesc.fOrigin = kTopLeft_GrSurfaceOrigin;
+ surfDesc.fWidth = kWidth;
+ surfDesc.fHeight = kHeight;
+ surfDesc.fConfig = config;
+ surfDesc.fSampleCnt = 0;
+ GrTexture* tex0 = gpu->createTexture(surfDesc, false, srcBuffer, 0);
+ if (tex0) {
+ REPORTER_ASSERT(reporter, canCreate);
+ gpu->readPixels(tex0, 0, 0, kWidth, kHeight, config, dstBuffer, 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(srcBuffer,
+ dstBuffer,
+ config,
+ kWidth,
+ kHeight));
+
+ tex0->writePixels(2, 10, 10, 2, config, srcBuffer);
+ memset(dstBuffer, 0, kWidth*kHeight*sizeof(GrColor));
+ gpu->readPixels(tex0, 2, 10, 10, 2, config, dstBuffer, 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(srcBuffer,
+ dstBuffer,
+ config,
+ 10,
+ 2));
+
+ tex0->unref();
+ } else {
+ REPORTER_ASSERT(reporter, !canCreate);
+ }
+
+ surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
+ GrTexture* tex1 = gpu->createTexture(surfDesc, false, srcBuffer, 0);
+ if (tex1) {
+ REPORTER_ASSERT(reporter, canCreate);
+ gpu->readPixels(tex1, 0, 0, kWidth, kHeight, config, dstBuffer, 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(srcBuffer,
+ dstBuffer,
+ config,
+ kWidth,
+ kHeight));
+
+ tex1->writePixels(5, 4, 4, 5, config, srcBuffer);
+ memset(dstBuffer, 0, kWidth*kHeight*sizeof(GrColor));
+ gpu->readPixels(tex1, 5, 4, 4, 5, config, dstBuffer, 0);
+ REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(srcBuffer,
+ dstBuffer,
+ config,
+ 4,
+ 5));
+
+ tex1->unref();
+ } else {
+ REPORTER_ASSERT(reporter, !canCreate);
+ }
+}
+
+DEF_GPUTEST(VkUploadPixelsTests, reporter, factory) {
+ GrContextOptions opts;
+ opts.fSuppressPrints = true;
+ GrContextFactory debugFactory(opts);
+ for (int type = 0; type < GrContextFactory::kLastGLContextType; ++type) {
+ if (static_cast<GrContextFactory::GLContextType>(type) !=
+ GrContextFactory::kNative_GLContextType) {
+ continue;
+ }
+ GrContext* context = debugFactory.get(static_cast<GrContextFactory::GLContextType>(type));
+ if (context) {
+ basic_texture_test(reporter, context, kRGBA_8888_GrPixelConfig, false, false);
+ basic_texture_test(reporter, context, kRGBA_8888_GrPixelConfig, true, false);
+ basic_texture_test(reporter, context, kRGBA_8888_GrPixelConfig, false, true);
+ basic_texture_test(reporter, context, kRGBA_8888_GrPixelConfig, true, true);
+ basic_texture_test(reporter, context, kBGRA_8888_GrPixelConfig, false, false);
+ basic_texture_test(reporter, context, kBGRA_8888_GrPixelConfig, true, false);
+ basic_texture_test(reporter, context, kBGRA_8888_GrPixelConfig, false, true);
+ basic_texture_test(reporter, context, kBGRA_8888_GrPixelConfig, true, true);
+ }
+
+ }
+}
+
+#endif
--- /dev/null
+// Copyright 2015 The Shaderc Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SHADERC_H_
+#define SHADERC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+typedef enum {
+ // Forced shader kinds. These shader kinds force the compiler to compile the
+ // source code as the specified kind of shader.
+ shaderc_glsl_vertex_shader,
+ shaderc_glsl_fragment_shader,
+ shaderc_glsl_compute_shader,
+ shaderc_glsl_geometry_shader,
+ shaderc_glsl_tess_control_shader,
+ shaderc_glsl_tess_evaluation_shader,
+ // Deduce the shader kind from #pragma annotation in the source code. Compiler
+ // will emit error if #pragma annotation is not found.
+ shaderc_glsl_infer_from_source,
+ // Default shader kinds. Compiler will fall back to compile the source code as
+ // the specified kind of shader when #pragma annotation is not found in the
+ // source code.
+ shaderc_glsl_default_vertex_shader,
+ shaderc_glsl_default_fragment_shader,
+ shaderc_glsl_default_compute_shader,
+ shaderc_glsl_default_geometry_shader,
+ shaderc_glsl_default_tess_control_shader,
+ shaderc_glsl_default_tess_evaluation_shader,
+} shaderc_shader_kind;
+
+typedef enum {
+ shaderc_target_env_vulkan, // create SPIR-V under Vulkan semantics
+ shaderc_target_env_opengl, // create SPIR-V under OpenGL semantics
+ shaderc_target_env_opengl_compat, // create SPIR-V under OpenGL semantics,
+ // including compatibility profile
+ // functions
+ shaderc_target_env_default = shaderc_target_env_vulkan
+} shaderc_target_env;
+
+typedef enum {
+ shaderc_profile_none, // Used if and only if GLSL version did not specify
+ // profiles.
+ shaderc_profile_core,
+ shaderc_profile_compatibility,
+ shaderc_profile_es,
+} shaderc_profile;
+
+// Used in the result module (shaderc_spv_module) to indicate the status of an
+// compilation.
+typedef enum {
+ shaderc_compilation_status_success = 0,
+ shaderc_compilation_status_invalid_stage, // error stage deduction
+ shaderc_compilation_status_compilation_error,
+ shaderc_compilation_status_internal_error, // unexpected failure
+ shaderc_compilation_status_null_result_module,
+} shaderc_compilation_status;
+
+// Usage examples:
+//
+// Aggressively release compiler resources, but spend time in initialization
+// for each new use.
+// shaderc_compiler_t compiler = shaderc_compiler_initialize();
+// shader_spv_module_t module = shaderc_compile_into_spv(compiler,
+// "int main() {}", 13, shaderc_glsl_vertex_shader, "main");
+// // Do stuff with module compilation results.
+// shaderc_module_release(module);
+// shaderc_compiler_release(compiler);
+//
+// Keep the compiler object around for a long time, but pay for extra space
+// occupied.
+// shaderc_compiler_t compiler = shaderc_compiler_initialize();
+// // On the same, other or multiple simultaneous threads.
+// shader_spv_module_t module = shaderc_compile_into_spv(compiler,
+// "int main() {}", 13, shaderc_glsl_vertex_shader, "main");
+// // Do stuff with module compilation results.
+// shaderc_module_release(module);
+// // Once no more compilations are to happen.
+// shaderc_compiler_release(compiler);
+
+// An opaque handle to an object that manages all compiler state.
+typedef struct shaderc_compiler* shaderc_compiler_t;
+
+// Returns a shaderc_compiler_t that can be used to compile modules.
+// A return of NULL indicates that there was an error initializing the compiler.
+// Any function operating on shaderc_compiler_t must offer the basic
+// thread-safety guarantee.
+// [http://herbsutter.com/2014/01/13/gotw-95-solution-thread-safety-and-synchronization/]
+// That is: concurrent invocation of these functions on DIFFERENT objects needs
+// no synchronization; concurrent invocation of these functions on the SAME
+// object requires synchronization IF AND ONLY IF some of them take a non-const
+// argument.
+shaderc_compiler_t shaderc_compiler_initialize(void);
+
+// Releases the resources held by the shaderc_compiler_t.
+// After this call it is invalid to make any future calls to functions
+// involving this shaderc_compiler_t.
+void shaderc_compiler_release(shaderc_compiler_t);
+
+// An opaque handle to an object that manages options to a single compilation
+// result.
+typedef struct shaderc_compile_options* shaderc_compile_options_t;
+
+// Returns a default-initialized shaderc_compile_options_t that can be used
+// to modify the functionality of a compiled module.
+// A return of NULL indicates that there was an error initializing the options.
+// Any function operating on shaderc_compile_options_t must offer the
+// basic thread-safety guarantee.
+shaderc_compile_options_t shaderc_compile_options_initialize(void);
+
+// Returns a copy of the given shaderc_compile_options_t.
+// If NULL is passed as the parameter the call is the same as
+// shaderc_compile_options_init.
+shaderc_compile_options_t shaderc_compile_options_clone(
+ const shaderc_compile_options_t options);
+
+// Releases the compilation options. It is invalid to use the given
+// shaderc_compile_options_t object in any future calls. It is safe to pass
+// NULL to this function, and doing such will have no effect.
+void shaderc_compile_options_release(shaderc_compile_options_t options);
+
+// Adds a predefined macro to the compilation options. This has the
+// same effect as passing -Dname=value to the command-line compiler.
+// If value is NULL, it has the effect same as passing -Dname to the
+// command-line compiler. If a macro definition with the same name has
+// previously been added, the value is replaced with the new value.
+// The null-terminated strings that the name and value parameters point to
+// must remain valid for the duration of the call, but can be modified or
+// deleted after this function has returned.
+void shaderc_compile_options_add_macro_definition(
+ shaderc_compile_options_t options, const char* name, const char* value);
+
+// Sets the compiler mode to generate debug information in the output.
+void shaderc_compile_options_set_generate_debug_info(
+ shaderc_compile_options_t options);
+
+// Sets the compiler mode to emit a disassembly text instead of a binary. In
+// this mode, the byte array result in the shaderc_spv_module returned
+// from shaderc_compile_into_spv() will consist of SPIR-V assembly text.
+// Note the preprocessing only mode overrides this option, and this option
+// overrides the default mode generating a SPIR-V binary.
+void shaderc_compile_options_set_disassembly_mode(
+ shaderc_compile_options_t options);
+
+// Forces the GLSL language version and profile to a given pair. The version
+// number is the same as would appear in the #version annotation in the source.
+// Version and profile specified here overrides the #version annotation in the
+// source. Use profile: 'shaderc_profile_none' for GLSL versions that do not
+// define profiles, e.g. versions below 150.
+void shaderc_compile_options_set_forced_version_profile(
+ shaderc_compile_options_t options, int version, shaderc_profile profile);
+
+// To support file inclusion, libshaderc invokes a callback into its client to
+// resolve the full path and content of the included file.
+// The client callback should follow the specified function signature below, and
+// it should be passed to libshaderc through the corresponding setter function.
+// When the including of a file is done, libshaderc will call another client
+// callback to clean up the resources used for the including process. The client
+// should implement the clean up method and pass it to libshaderc together with
+// the response method.
+
+// The struct that contains the information to be returned to the libshaderc.
+// The client-side implemented response method should return a pointer of this
+// struct. The underlying data is owned by client code.
+struct shaderc_includer_response {
+ const char* path;
+ size_t path_length;
+ const char* content;
+ size_t content_length;
+};
+
+// The function signature of the client-side implemented response method. It
+// returns a pointer to shaderc_includer_response struct.
+typedef shaderc_includer_response* (*shaderc_includer_response_get_fn)(
+ void* user_data, const char* filename);
+
+// The function signature of the client-side implemented clean-up method.
+// Includer will call this callback function when the including process is done
+// with the fullpath and content data.
+typedef void (*shaderc_includer_response_release_fn)(
+ void* user_data, shaderc_includer_response* data);
+
+// Sets the callback functions for the includer. When the includer queries for
+// the full path and content of a file, client's method will be called to
+// response. And when the query is done, client will be notified to clean up.
+// TODO: File inclusion needs to be context-aware.
+// e.g.
+// In file: /path/to/main_shader.vert:
+// #include "include/a"
+// In file: /path/to/include/a":
+// #include "b"
+// When compiling /path/to/main_shader.vert, the compiler should be able to
+// go to /path/to/include/b to find the file b.
+// This needs context info from compiler to client includer, and may needs
+// interface changes.
+void shaderc_compile_options_set_includer_callbacks(
+ shaderc_compile_options_t options, shaderc_includer_response_get_fn getter,
+ shaderc_includer_response_release_fn releasor, void* user_data);
+
+// Sets the compiler mode to do only preprocessing. The byte array result in the
+// module returned by the compilation is the text of the preprocessed shader.
+// This option overrides all other compilation modes, such as disassembly mode
+// and the default mode of compilation to SPIR-V binary.
+void shaderc_compile_options_set_preprocessing_only_mode(
+ shaderc_compile_options_t options);
+
+// Sets the compiler mode to suppress warnings, overriding warnings-as-errors
+// mode. When both suppress-warnings and warnings-as-errors modes are
+// turned on, warning messages will be inhibited, and will not be emitted
+// as error messages.
+void shaderc_compile_options_set_suppress_warnings(
+ shaderc_compile_options_t options);
+
+// Sets the target shader environment, affecting which warnings or errors will
+// be issued. The version will be for distinguishing between different versions
+// of the target environment. "0" is the only supported version at this point
+void shaderc_compile_options_set_target_env(shaderc_compile_options_t options,
+ shaderc_target_env target,
+ uint32_t version);
+
+// Sets the compiler mode to treat all warnings as errors. Note the
+// suppress-warnings mode overrides this option, i.e. if both
+// warning-as-errors and suppress-warnings modes are set, warnings will not
+// be emitted as error messages.
+void shaderc_compile_options_set_warnings_as_errors(
+ shaderc_compile_options_t options);
+
+// An opaque handle to the results of a call to shaderc_compile_into_spv().
+typedef struct shaderc_spv_module* shaderc_spv_module_t;
+
+// Takes a GLSL source string and the associated shader kind, input file
+// name, compiles it according to the given additional_options. If the shader
+// kind is not set to a specified kind, but shaderc_glslc_infer_from_source,
+// the compiler will try to deduce the shader kind from the source
+// string and a failure in deducing will generate an error. Currently only
+// #pragma annotation is supported. If the shader kind is set to one of the
+// default shader kinds, the compiler will fall back to the default shader
+// kind in case it failed to deduce the shader kind from source string.
+// The input_file_name is a null-termintated string. It is used as a tag to
+// identify the source string in cases like emitting error messages. It
+// doesn't have to be a 'file name'.
+// By default the source string will be compiled into SPIR-V binary
+// and a shaderc_spv_module will be returned to hold the results of the
+// compilation. When disassembly mode or preprocessing only mode is enabled
+// in the additional_options, the source string will be compiled into char
+// strings and held by the returned shaderc_spv_module. The entry_point_name
+// null-terminated string defines the name of the entry point to associate
+// with this GLSL source. If the additional_options parameter is not NULL,
+// then the compilation is modified by any options present. May be safely
+// called from multiple threads without explicit synchronization. If there
+// was failure in allocating the compiler object NULL will be returned.
+shaderc_spv_module_t shaderc_compile_into_spv(
+ const shaderc_compiler_t compiler, const char* source_text,
+ size_t source_text_size, shaderc_shader_kind shader_kind,
+ const char* input_file_name, const char* entry_point_name,
+ const shaderc_compile_options_t additional_options);
+
+// The following functions, operating on shaderc_spv_module_t objects, offer
+// only the basic thread-safety guarantee.
+
+// Releases the resources held by module. It is invalid to use module for any
+// further operations.
+void shaderc_module_release(shaderc_spv_module_t module);
+
+// Returns true if the result in module was a successful compilation.
+bool shaderc_module_get_success(const shaderc_spv_module_t module);
+
+// Returns the number of bytes in a SPIR-V module result string. When the module
+// is compiled with disassembly mode or preprocessing only mode, the result
+// string is a char string. Otherwise, the result string is binary string.
+size_t shaderc_module_get_length(const shaderc_spv_module_t module);
+
+// Returns the number of warnings generated during the compilation.
+size_t shaderc_module_get_num_warnings(const shaderc_spv_module_t module);
+
+// Returns the number of errors generated during the compilation.
+size_t shaderc_module_get_num_errors(const shaderc_spv_module_t module);
+
+// Returns the compilation status, indicating whether the compilation succeeded,
+// or failed due to some reasons, like invalid shader stage or compilation
+// errors.
+shaderc_compilation_status shaderc_module_get_compilation_status(
+ const shaderc_spv_module_t);
+
+// Returns a pointer to the start of the SPIR-V bytes, either SPIR-V binary or
+// char string. When the source string is compiled into SPIR-V binary, this is
+// guaranteed to be castable to a uint32_t*. If the source string is compiled in
+// disassembly mode or preprocessing only mode, the pointer will point to the
+// result char string.
+const char* shaderc_module_get_bytes(const shaderc_spv_module_t module);
+
+// Returns a null-terminated string that contains any error messages generated
+// during the compilation.
+const char* shaderc_module_get_error_message(const shaderc_spv_module_t module);
+
+// Provides the version & revision of the SPIR-V which will be produced
+void shaderc_get_spv_version(unsigned int* version, unsigned int* revision);
+
+// Parses the version and profile from a given null-terminated string
+// containing both version and profile, like: '450core'. Returns false if
+// the string can not be parsed. Returns true when the parsing succeeds. The
+// parsed version and profile are returned through arguments.
+bool shaderc_parse_version_profile(const char* str, int* version,
+ shaderc_profile* profile);
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+#endif // SHADERC_H_
--- /dev/null
+//
+// File: vk_platform.h
+//
+/*
+** Copyright (c) 2014-2015 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+
+#ifndef __VK_PLATFORM_H__
+#define __VK_PLATFORM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif // __cplusplus
+
+/*
+***************************************************************************************************
+* Platform-specific directives and type declarations
+***************************************************************************************************
+*/
+
+/* Platform-specific calling convention macros.
+ *
+ * Platforms should define these so that Vulkan clients call Vulkan commands
+ * with the same calling conventions that the Vulkan implementation expects.
+ *
+ * VKAPI_ATTR - Placed before the return type in function declarations.
+ * Useful for C++11 and GCC/Clang-style function attribute syntax.
+ * VKAPI_CALL - Placed after the return type in function declarations.
+ * Useful for MSVC-style calling convention syntax.
+ * VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
+ *
+ * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
+ * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
+ */
+#if defined(_WIN32)
+ // On Windows, Vulkan commands use the stdcall convention
+ #define VKAPI_ATTR
+ #define VKAPI_CALL __stdcall
+ #define VKAPI_PTR VKAPI_CALL
+#elif defined(__ANDROID__) && defined(__ARM_EABI__) && !defined(__ARM_ARCH_7A__)
+ // Android does not support Vulkan in native code using the "armeabi" ABI.
+ #error "Vulkan requires the 'armeabi-v7a' or 'armeabi-v7a-hard' ABI on 32-bit ARM CPUs"
+#elif defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
+ // On Android/ARMv7a, Vulkan functions use the armeabi-v7a-hard calling
+ // convention, even if the application's native code is compiled with the
+ // armeabi-v7a calling convention.
+ #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
+ #define VKAPI_CALL
+ #define VKAPI_PTR VKAPI_ATTR
+#else
+ // On other platforms, use the default calling convention
+ #define VKAPI_ATTR
+ #define VKAPI_CALL
+ #define VKAPI_PTR
+#endif
+
+#include <stddef.h>
+
+#if !defined(VK_NO_STDINT_H)
+ #if defined(_MSC_VER) && (_MSC_VER < 1600)
+ typedef signed __int8 int8_t;
+ typedef unsigned __int8 uint8_t;
+ typedef signed __int16 int16_t;
+ typedef unsigned __int16 uint16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int32 uint32_t;
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+ #else
+ #include <stdint.h>
+ #endif
+#endif // !defined(VK_NO_STDINT_H)
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+// Platform-specific headers required by platform window system extensions.
+// These are enabled prior to #including "vulkan.h". The same enable then
+// controls inclusion of the extension interfaces in vulkan.h.
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#include <android/native_window.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#include <mir_toolkit/client_types.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#include <wayland-client.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#include <windows.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#include <X11/Xlib.h>
+#endif
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#include <xcb/xcb.h>
+#endif
+
+#endif // __VK_PLATFORM_H__
--- /dev/null
+#ifndef __vulkan_h_
+#define __vulkan_h_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2015-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+/*
+** This header is generated from the Khronos Vulkan XML API Registry.
+**
+*/
+
+
+#define VK_VERSION_1_0 1
+#include "vk_platform.h"
+
+#define VK_MAKE_VERSION(major, minor, patch) \
+ (((major) << 22) | ((minor) << 12) | (patch))
+
+// Vulkan API version supported by this file
+#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 3)
+
+#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)
+#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
+#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
+
+#define VK_NULL_HANDLE 0
+
+
+
+#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
+
+
+#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;
+#else
+ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#endif
+
+
+
+typedef uint32_t VkFlags;
+typedef uint32_t VkBool32;
+typedef uint64_t VkDeviceSize;
+typedef uint32_t VkSampleMask;
+
+VK_DEFINE_HANDLE(VkInstance)
+VK_DEFINE_HANDLE(VkPhysicalDevice)
+VK_DEFINE_HANDLE(VkDevice)
+VK_DEFINE_HANDLE(VkQueue)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore)
+VK_DEFINE_HANDLE(VkCommandBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)
+
+#define VK_LOD_CLAMP_NONE 1000.0f
+#define VK_REMAINING_MIP_LEVELS (~0U)
+#define VK_REMAINING_ARRAY_LAYERS (~0U)
+#define VK_WHOLE_SIZE (~0ULL)
+#define VK_ATTACHMENT_UNUSED (~0U)
+#define VK_TRUE 1
+#define VK_FALSE 0
+#define VK_QUEUE_FAMILY_IGNORED (~0U)
+#define VK_SUBPASS_EXTERNAL (~0U)
+#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256
+#define VK_UUID_SIZE 16
+#define VK_MAX_MEMORY_TYPES 32
+#define VK_MAX_MEMORY_HEAPS 16
+#define VK_MAX_EXTENSION_NAME_SIZE 256
+#define VK_MAX_DESCRIPTION_SIZE 256
+
+
+typedef enum VkPipelineCacheHeaderVersion {
+ VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,
+ VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+ VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1),
+ VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCacheHeaderVersion;
+
+typedef enum VkResult {
+ VK_SUCCESS = 0,
+ VK_NOT_READY = 1,
+ VK_TIMEOUT = 2,
+ VK_EVENT_SET = 3,
+ VK_EVENT_RESET = 4,
+ VK_INCOMPLETE = 5,
+ VK_ERROR_OUT_OF_HOST_MEMORY = -1,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,
+ VK_ERROR_INITIALIZATION_FAILED = -3,
+ VK_ERROR_DEVICE_LOST = -4,
+ VK_ERROR_MEMORY_MAP_FAILED = -5,
+ VK_ERROR_LAYER_NOT_PRESENT = -6,
+ VK_ERROR_EXTENSION_NOT_PRESENT = -7,
+ VK_ERROR_FEATURE_NOT_PRESENT = -8,
+ VK_ERROR_INCOMPATIBLE_DRIVER = -9,
+ VK_ERROR_TOO_MANY_OBJECTS = -10,
+ VK_ERROR_FORMAT_NOT_SUPPORTED = -11,
+ VK_ERROR_SURFACE_LOST_KHR = -1000000000,
+ VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
+ VK_SUBOPTIMAL_KHR = 1000001003,
+ VK_ERROR_OUT_OF_DATE_KHR = -1000001004,
+ VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,
+ VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,
+ VK_RESULT_BEGIN_RANGE = VK_ERROR_FORMAT_NOT_SUPPORTED,
+ VK_RESULT_END_RANGE = VK_INCOMPLETE,
+ VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FORMAT_NOT_SUPPORTED + 1),
+ VK_RESULT_MAX_ENUM = 0x7FFFFFFF
+} VkResult;
+
+typedef enum VkStructureType {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2,
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3,
+ VK_STRUCTURE_TYPE_SUBMIT_INFO = 4,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5,
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6,
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7,
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8,
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9,
+ VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10,
+ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11,
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13,
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16,
+ VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19,
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20,
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23,
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24,
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25,
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26,
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28,
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29,
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35,
+ VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36,
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38,
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42,
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46,
+ VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47,
+ VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48,
+ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,
+ VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000,
+ VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001,
+ VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000,
+ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,
+ VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,
+ VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
+ VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000,
+ VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,
+ VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = 1000011000,
+ VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1),
+ VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkStructureType;
+
+typedef enum VkSystemAllocationScope {
+ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,
+ VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4,
+ VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND,
+ VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE,
+ VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1),
+ VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF
+} VkSystemAllocationScope;
+
+typedef enum VkInternalAllocationType {
+ VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0,
+ VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+ VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE,
+ VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1),
+ VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkInternalAllocationType;
+
+typedef enum VkFormat {
+ VK_FORMAT_UNDEFINED = 0,
+ VK_FORMAT_R4G4_UNORM_PACK8 = 1,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,
+ VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,
+ VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,
+ VK_FORMAT_R8_UNORM = 9,
+ VK_FORMAT_R8_SNORM = 10,
+ VK_FORMAT_R8_USCALED = 11,
+ VK_FORMAT_R8_SSCALED = 12,
+ VK_FORMAT_R8_UINT = 13,
+ VK_FORMAT_R8_SINT = 14,
+ VK_FORMAT_R8_SRGB = 15,
+ VK_FORMAT_R8G8_UNORM = 16,
+ VK_FORMAT_R8G8_SNORM = 17,
+ VK_FORMAT_R8G8_USCALED = 18,
+ VK_FORMAT_R8G8_SSCALED = 19,
+ VK_FORMAT_R8G8_UINT = 20,
+ VK_FORMAT_R8G8_SINT = 21,
+ VK_FORMAT_R8G8_SRGB = 22,
+ VK_FORMAT_R8G8B8_UNORM = 23,
+ VK_FORMAT_R8G8B8_SNORM = 24,
+ VK_FORMAT_R8G8B8_USCALED = 25,
+ VK_FORMAT_R8G8B8_SSCALED = 26,
+ VK_FORMAT_R8G8B8_UINT = 27,
+ VK_FORMAT_R8G8B8_SINT = 28,
+ VK_FORMAT_R8G8B8_SRGB = 29,
+ VK_FORMAT_B8G8R8_UNORM = 30,
+ VK_FORMAT_B8G8R8_SNORM = 31,
+ VK_FORMAT_B8G8R8_USCALED = 32,
+ VK_FORMAT_B8G8R8_SSCALED = 33,
+ VK_FORMAT_B8G8R8_UINT = 34,
+ VK_FORMAT_B8G8R8_SINT = 35,
+ VK_FORMAT_B8G8R8_SRGB = 36,
+ VK_FORMAT_R8G8B8A8_UNORM = 37,
+ VK_FORMAT_R8G8B8A8_SNORM = 38,
+ VK_FORMAT_R8G8B8A8_USCALED = 39,
+ VK_FORMAT_R8G8B8A8_SSCALED = 40,
+ VK_FORMAT_R8G8B8A8_UINT = 41,
+ VK_FORMAT_R8G8B8A8_SINT = 42,
+ VK_FORMAT_R8G8B8A8_SRGB = 43,
+ VK_FORMAT_B8G8R8A8_UNORM = 44,
+ VK_FORMAT_B8G8R8A8_SNORM = 45,
+ VK_FORMAT_B8G8R8A8_USCALED = 46,
+ VK_FORMAT_B8G8R8A8_SSCALED = 47,
+ VK_FORMAT_B8G8R8A8_UINT = 48,
+ VK_FORMAT_B8G8R8A8_SINT = 49,
+ VK_FORMAT_B8G8R8A8_SRGB = 50,
+ VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,
+ VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,
+ VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,
+ VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,
+ VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,
+ VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,
+ VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,
+ VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,
+ VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,
+ VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,
+ VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,
+ VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,
+ VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,
+ VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,
+ VK_FORMAT_R16_UNORM = 70,
+ VK_FORMAT_R16_SNORM = 71,
+ VK_FORMAT_R16_USCALED = 72,
+ VK_FORMAT_R16_SSCALED = 73,
+ VK_FORMAT_R16_UINT = 74,
+ VK_FORMAT_R16_SINT = 75,
+ VK_FORMAT_R16_SFLOAT = 76,
+ VK_FORMAT_R16G16_UNORM = 77,
+ VK_FORMAT_R16G16_SNORM = 78,
+ VK_FORMAT_R16G16_USCALED = 79,
+ VK_FORMAT_R16G16_SSCALED = 80,
+ VK_FORMAT_R16G16_UINT = 81,
+ VK_FORMAT_R16G16_SINT = 82,
+ VK_FORMAT_R16G16_SFLOAT = 83,
+ VK_FORMAT_R16G16B16_UNORM = 84,
+ VK_FORMAT_R16G16B16_SNORM = 85,
+ VK_FORMAT_R16G16B16_USCALED = 86,
+ VK_FORMAT_R16G16B16_SSCALED = 87,
+ VK_FORMAT_R16G16B16_UINT = 88,
+ VK_FORMAT_R16G16B16_SINT = 89,
+ VK_FORMAT_R16G16B16_SFLOAT = 90,
+ VK_FORMAT_R16G16B16A16_UNORM = 91,
+ VK_FORMAT_R16G16B16A16_SNORM = 92,
+ VK_FORMAT_R16G16B16A16_USCALED = 93,
+ VK_FORMAT_R16G16B16A16_SSCALED = 94,
+ VK_FORMAT_R16G16B16A16_UINT = 95,
+ VK_FORMAT_R16G16B16A16_SINT = 96,
+ VK_FORMAT_R16G16B16A16_SFLOAT = 97,
+ VK_FORMAT_R32_UINT = 98,
+ VK_FORMAT_R32_SINT = 99,
+ VK_FORMAT_R32_SFLOAT = 100,
+ VK_FORMAT_R32G32_UINT = 101,
+ VK_FORMAT_R32G32_SINT = 102,
+ VK_FORMAT_R32G32_SFLOAT = 103,
+ VK_FORMAT_R32G32B32_UINT = 104,
+ VK_FORMAT_R32G32B32_SINT = 105,
+ VK_FORMAT_R32G32B32_SFLOAT = 106,
+ VK_FORMAT_R32G32B32A32_UINT = 107,
+ VK_FORMAT_R32G32B32A32_SINT = 108,
+ VK_FORMAT_R32G32B32A32_SFLOAT = 109,
+ VK_FORMAT_R64_UINT = 110,
+ VK_FORMAT_R64_SINT = 111,
+ VK_FORMAT_R64_SFLOAT = 112,
+ VK_FORMAT_R64G64_UINT = 113,
+ VK_FORMAT_R64G64_SINT = 114,
+ VK_FORMAT_R64G64_SFLOAT = 115,
+ VK_FORMAT_R64G64B64_UINT = 116,
+ VK_FORMAT_R64G64B64_SINT = 117,
+ VK_FORMAT_R64G64B64_SFLOAT = 118,
+ VK_FORMAT_R64G64B64A64_UINT = 119,
+ VK_FORMAT_R64G64B64A64_SINT = 120,
+ VK_FORMAT_R64G64B64A64_SFLOAT = 121,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,
+ VK_FORMAT_D16_UNORM = 124,
+ VK_FORMAT_X8_D24_UNORM_PACK32 = 125,
+ VK_FORMAT_D32_SFLOAT = 126,
+ VK_FORMAT_S8_UINT = 127,
+ VK_FORMAT_D16_UNORM_S8_UINT = 128,
+ VK_FORMAT_D24_UNORM_S8_UINT = 129,
+ VK_FORMAT_D32_SFLOAT_S8_UINT = 130,
+ VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,
+ VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,
+ VK_FORMAT_BC2_UNORM_BLOCK = 135,
+ VK_FORMAT_BC2_SRGB_BLOCK = 136,
+ VK_FORMAT_BC3_UNORM_BLOCK = 137,
+ VK_FORMAT_BC3_SRGB_BLOCK = 138,
+ VK_FORMAT_BC4_UNORM_BLOCK = 139,
+ VK_FORMAT_BC4_SNORM_BLOCK = 140,
+ VK_FORMAT_BC5_UNORM_BLOCK = 141,
+ VK_FORMAT_BC5_SNORM_BLOCK = 142,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,
+ VK_FORMAT_BC7_UNORM_BLOCK = 145,
+ VK_FORMAT_BC7_SRGB_BLOCK = 146,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,
+ VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,
+ VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,
+ VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,
+ VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,
+ VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,
+ VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,
+ VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,
+ VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,
+ VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED,
+ VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1),
+ VK_FORMAT_MAX_ENUM = 0x7FFFFFFF
+} VkFormat;
+
+typedef enum VkImageType {
+ VK_IMAGE_TYPE_1D = 0,
+ VK_IMAGE_TYPE_2D = 1,
+ VK_IMAGE_TYPE_3D = 2,
+ VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D,
+ VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1),
+ VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageType;
+
+typedef enum VkImageTiling {
+ VK_IMAGE_TILING_OPTIMAL = 0,
+ VK_IMAGE_TILING_LINEAR = 1,
+ VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR,
+ VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1),
+ VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF
+} VkImageTiling;
+
+typedef enum VkPhysicalDeviceType {
+ VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,
+ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,
+ VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,
+ VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,
+ VK_PHYSICAL_DEVICE_TYPE_CPU = 4,
+ VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER,
+ VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU,
+ VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1),
+ VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkPhysicalDeviceType;
+
+typedef enum VkQueryType {
+ VK_QUERY_TYPE_OCCLUSION = 0,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
+ VK_QUERY_TYPE_TIMESTAMP = 2,
+ VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION,
+ VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP,
+ VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1),
+ VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkQueryType;
+
+typedef enum VkSharingMode {
+ VK_SHARING_MODE_EXCLUSIVE = 0,
+ VK_SHARING_MODE_CONCURRENT = 1,
+ VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE,
+ VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT,
+ VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1),
+ VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSharingMode;
+
+typedef enum VkImageLayout {
+ VK_IMAGE_LAYOUT_UNDEFINED = 0,
+ VK_IMAGE_LAYOUT_GENERAL = 1,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,
+ VK_IMAGE_LAYOUT_PREINITIALIZED = 8,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
+ VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED,
+ VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1),
+ VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF
+} VkImageLayout;
+
+typedef enum VkImageViewType {
+ VK_IMAGE_VIEW_TYPE_1D = 0,
+ VK_IMAGE_VIEW_TYPE_2D = 1,
+ VK_IMAGE_VIEW_TYPE_3D = 2,
+ VK_IMAGE_VIEW_TYPE_CUBE = 3,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6,
+ VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1),
+ VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkImageViewType;
+
+typedef enum VkComponentSwizzle {
+ VK_COMPONENT_SWIZZLE_IDENTITY = 0,
+ VK_COMPONENT_SWIZZLE_ZERO = 1,
+ VK_COMPONENT_SWIZZLE_ONE = 2,
+ VK_COMPONENT_SWIZZLE_R = 3,
+ VK_COMPONENT_SWIZZLE_G = 4,
+ VK_COMPONENT_SWIZZLE_B = 5,
+ VK_COMPONENT_SWIZZLE_A = 6,
+ VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A,
+ VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1),
+ VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF
+} VkComponentSwizzle;
+
+typedef enum VkVertexInputRate {
+ VK_VERTEX_INPUT_RATE_VERTEX = 0,
+ VK_VERTEX_INPUT_RATE_INSTANCE = 1,
+ VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX,
+ VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE,
+ VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1),
+ VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF
+} VkVertexInputRate;
+
+typedef enum VkPrimitiveTopology {
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10,
+ VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
+ VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1),
+ VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF
+} VkPrimitiveTopology;
+
+typedef enum VkPolygonMode {
+ VK_POLYGON_MODE_FILL = 0,
+ VK_POLYGON_MODE_LINE = 1,
+ VK_POLYGON_MODE_POINT = 2,
+ VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL,
+ VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT,
+ VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1),
+ VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkPolygonMode;
+
+typedef enum VkFrontFace {
+ VK_FRONT_FACE_COUNTER_CLOCKWISE = 0,
+ VK_FRONT_FACE_CLOCKWISE = 1,
+ VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE,
+ VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1),
+ VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF
+} VkFrontFace;
+
+typedef enum VkCompareOp {
+ VK_COMPARE_OP_NEVER = 0,
+ VK_COMPARE_OP_LESS = 1,
+ VK_COMPARE_OP_EQUAL = 2,
+ VK_COMPARE_OP_LESS_OR_EQUAL = 3,
+ VK_COMPARE_OP_GREATER = 4,
+ VK_COMPARE_OP_NOT_EQUAL = 5,
+ VK_COMPARE_OP_GREATER_OR_EQUAL = 6,
+ VK_COMPARE_OP_ALWAYS = 7,
+ VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER,
+ VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS,
+ VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1),
+ VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkCompareOp;
+
+typedef enum VkStencilOp {
+ VK_STENCIL_OP_KEEP = 0,
+ VK_STENCIL_OP_ZERO = 1,
+ VK_STENCIL_OP_REPLACE = 2,
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3,
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4,
+ VK_STENCIL_OP_INVERT = 5,
+ VK_STENCIL_OP_INCREMENT_AND_WRAP = 6,
+ VK_STENCIL_OP_DECREMENT_AND_WRAP = 7,
+ VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP,
+ VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP,
+ VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1),
+ VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF
+} VkStencilOp;
+
+typedef enum VkLogicOp {
+ VK_LOGIC_OP_CLEAR = 0,
+ VK_LOGIC_OP_AND = 1,
+ VK_LOGIC_OP_AND_REVERSE = 2,
+ VK_LOGIC_OP_COPY = 3,
+ VK_LOGIC_OP_AND_INVERTED = 4,
+ VK_LOGIC_OP_NO_OP = 5,
+ VK_LOGIC_OP_XOR = 6,
+ VK_LOGIC_OP_OR = 7,
+ VK_LOGIC_OP_NOR = 8,
+ VK_LOGIC_OP_EQUIVALENT = 9,
+ VK_LOGIC_OP_INVERT = 10,
+ VK_LOGIC_OP_OR_REVERSE = 11,
+ VK_LOGIC_OP_COPY_INVERTED = 12,
+ VK_LOGIC_OP_OR_INVERTED = 13,
+ VK_LOGIC_OP_NAND = 14,
+ VK_LOGIC_OP_SET = 15,
+ VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR,
+ VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET,
+ VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1),
+ VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF
+} VkLogicOp;
+
+typedef enum VkBlendFactor {
+ VK_BLEND_FACTOR_ZERO = 0,
+ VK_BLEND_FACTOR_ONE = 1,
+ VK_BLEND_FACTOR_SRC_COLOR = 2,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,
+ VK_BLEND_FACTOR_DST_COLOR = 4,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,
+ VK_BLEND_FACTOR_SRC_ALPHA = 6,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ VK_BLEND_FACTOR_DST_ALPHA = 8,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,
+ VK_BLEND_FACTOR_CONSTANT_COLOR = 10,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,
+ VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,
+ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,
+ VK_BLEND_FACTOR_SRC1_COLOR = 15,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,
+ VK_BLEND_FACTOR_SRC1_ALPHA = 17,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18,
+ VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO,
+ VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
+ VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1),
+ VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF
+} VkBlendFactor;
+
+typedef enum VkBlendOp {
+ VK_BLEND_OP_ADD = 0,
+ VK_BLEND_OP_SUBTRACT = 1,
+ VK_BLEND_OP_REVERSE_SUBTRACT = 2,
+ VK_BLEND_OP_MIN = 3,
+ VK_BLEND_OP_MAX = 4,
+ VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD,
+ VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX,
+ VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1),
+ VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF
+} VkBlendOp;
+
+typedef enum VkDynamicState {
+ VK_DYNAMIC_STATE_VIEWPORT = 0,
+ VK_DYNAMIC_STATE_SCISSOR = 1,
+ VK_DYNAMIC_STATE_LINE_WIDTH = 2,
+ VK_DYNAMIC_STATE_DEPTH_BIAS = 3,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5,
+ VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8,
+ VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1),
+ VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF
+} VkDynamicState;
+
+typedef enum VkFilter {
+ VK_FILTER_NEAREST = 0,
+ VK_FILTER_LINEAR = 1,
+ VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST,
+ VK_FILTER_END_RANGE = VK_FILTER_LINEAR,
+ VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1),
+ VK_FILTER_MAX_ENUM = 0x7FFFFFFF
+} VkFilter;
+
+typedef enum VkSamplerMipmapMode {
+ VK_SAMPLER_MIPMAP_MODE_NEAREST = 0,
+ VK_SAMPLER_MIPMAP_MODE_LINEAR = 1,
+ VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST,
+ VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR,
+ VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1),
+ VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerMipmapMode;
+
+typedef enum VkSamplerAddressMode {
+ VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3,
+ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4,
+ VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,
+ VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1),
+ VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkSamplerAddressMode;
+
+typedef enum VkBorderColor {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
+ VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE,
+ VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1),
+ VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF
+} VkBorderColor;
+
+typedef enum VkDescriptorType {
+ VK_DESCRIPTOR_TYPE_SAMPLER = 0,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,
+ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,
+ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,
+ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,
+ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
+ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
+ VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
+ VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
+ VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1),
+ VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkDescriptorType;
+
+typedef enum VkAttachmentLoadOp {
+ VK_ATTACHMENT_LOAD_OP_LOAD = 0,
+ VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
+ VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD,
+ VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1),
+ VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentLoadOp;
+
+typedef enum VkAttachmentStoreOp {
+ VK_ATTACHMENT_STORE_OP_STORE = 0,
+ VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
+ VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE,
+ VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1),
+ VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF
+} VkAttachmentStoreOp;
+
+typedef enum VkPipelineBindPoint {
+ VK_PIPELINE_BIND_POINT_GRAPHICS = 0,
+ VK_PIPELINE_BIND_POINT_COMPUTE = 1,
+ VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
+ VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1),
+ VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineBindPoint;
+
+typedef enum VkCommandBufferLevel {
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0,
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1,
+ VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY,
+ VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1),
+ VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF
+} VkCommandBufferLevel;
+
+typedef enum VkIndexType {
+ VK_INDEX_TYPE_UINT16 = 0,
+ VK_INDEX_TYPE_UINT32 = 1,
+ VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16,
+ VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32,
+ VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1),
+ VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF
+} VkIndexType;
+
+typedef enum VkSubpassContents {
+ VK_SUBPASS_CONTENTS_INLINE = 0,
+ VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1,
+ VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE,
+ VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS,
+ VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1),
+ VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF
+} VkSubpassContents;
+
+typedef VkFlags VkInstanceCreateFlags;
+
+typedef enum VkFormatFeatureFlagBits {
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,
+ VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400,
+ VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
+} VkFormatFeatureFlagBits;
+typedef VkFlags VkFormatFeatureFlags;
+
+typedef enum VkImageUsageFlagBits {
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002,
+ VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,
+ VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020,
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,
+} VkImageUsageFlagBits;
+typedef VkFlags VkImageUsageFlags;
+
+typedef enum VkImageCreateFlagBits {
+ VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+ VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008,
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010,
+} VkImageCreateFlagBits;
+typedef VkFlags VkImageCreateFlags;
+
+typedef enum VkSampleCountFlagBits {
+ VK_SAMPLE_COUNT_1_BIT = 0x00000001,
+ VK_SAMPLE_COUNT_2_BIT = 0x00000002,
+ VK_SAMPLE_COUNT_4_BIT = 0x00000004,
+ VK_SAMPLE_COUNT_8_BIT = 0x00000008,
+ VK_SAMPLE_COUNT_16_BIT = 0x00000010,
+ VK_SAMPLE_COUNT_32_BIT = 0x00000020,
+ VK_SAMPLE_COUNT_64_BIT = 0x00000040,
+} VkSampleCountFlagBits;
+typedef VkFlags VkSampleCountFlags;
+
+typedef enum VkQueueFlagBits {
+ VK_QUEUE_GRAPHICS_BIT = 0x00000001,
+ VK_QUEUE_COMPUTE_BIT = 0x00000002,
+ VK_QUEUE_TRANSFER_BIT = 0x00000004,
+ VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008,
+} VkQueueFlagBits;
+typedef VkFlags VkQueueFlags;
+
+typedef enum VkMemoryPropertyFlagBits {
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002,
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004,
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008,
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
+} VkMemoryPropertyFlagBits;
+typedef VkFlags VkMemoryPropertyFlags;
+
+typedef enum VkMemoryHeapFlagBits {
+ VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001,
+} VkMemoryHeapFlagBits;
+typedef VkFlags VkMemoryHeapFlags;
+typedef VkFlags VkDeviceCreateFlags;
+typedef VkFlags VkDeviceQueueCreateFlags;
+
+typedef enum VkPipelineStageFlagBits {
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,
+ VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010,
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020,
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,
+ VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000,
+ VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000,
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000,
+} VkPipelineStageFlagBits;
+typedef VkFlags VkPipelineStageFlags;
+typedef VkFlags VkMemoryMapFlags;
+
+typedef enum VkImageAspectFlagBits {
+ VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001,
+ VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,
+ VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004,
+ VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008,
+} VkImageAspectFlagBits;
+typedef VkFlags VkImageAspectFlags;
+
+typedef enum VkSparseImageFormatFlagBits {
+ VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001,
+ VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002,
+ VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004,
+} VkSparseImageFormatFlagBits;
+typedef VkFlags VkSparseImageFormatFlags;
+
+typedef enum VkSparseMemoryBindFlagBits {
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001,
+} VkSparseMemoryBindFlagBits;
+typedef VkFlags VkSparseMemoryBindFlags;
+
+typedef enum VkFenceCreateFlagBits {
+ VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
+} VkFenceCreateFlagBits;
+typedef VkFlags VkFenceCreateFlags;
+typedef VkFlags VkSemaphoreCreateFlags;
+typedef VkFlags VkEventCreateFlags;
+typedef VkFlags VkQueryPoolCreateFlags;
+
+typedef enum VkQueryPipelineStatisticFlagBits {
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001,
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002,
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008,
+ VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020,
+ VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040,
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100,
+ VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200,
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400,
+} VkQueryPipelineStatisticFlagBits;
+typedef VkFlags VkQueryPipelineStatisticFlags;
+
+typedef enum VkQueryResultFlagBits {
+ VK_QUERY_RESULT_64_BIT = 0x00000001,
+ VK_QUERY_RESULT_WAIT_BIT = 0x00000002,
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,
+ VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,
+} VkQueryResultFlagBits;
+typedef VkFlags VkQueryResultFlags;
+
+typedef enum VkBufferCreateFlagBits {
+ VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001,
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,
+} VkBufferCreateFlagBits;
+typedef VkFlags VkBufferCreateFlags;
+
+typedef enum VkBufferUsageFlagBits {
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001,
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002,
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,
+} VkBufferUsageFlagBits;
+typedef VkFlags VkBufferUsageFlags;
+typedef VkFlags VkBufferViewCreateFlags;
+typedef VkFlags VkImageViewCreateFlags;
+typedef VkFlags VkShaderModuleCreateFlags;
+typedef VkFlags VkPipelineCacheCreateFlags;
+
+typedef enum VkPipelineCreateFlagBits {
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
+} VkPipelineCreateFlagBits;
+typedef VkFlags VkPipelineCreateFlags;
+typedef VkFlags VkPipelineShaderStageCreateFlags;
+
+typedef enum VkShaderStageFlagBits {
+ VK_SHADER_STAGE_VERTEX_BIT = 0x00000001,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004,
+ VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008,
+ VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010,
+ VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,
+ VK_SHADER_STAGE_ALL_GRAPHICS = 0x1F,
+ VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
+} VkShaderStageFlagBits;
+typedef VkFlags VkPipelineVertexInputStateCreateFlags;
+typedef VkFlags VkPipelineInputAssemblyStateCreateFlags;
+typedef VkFlags VkPipelineTessellationStateCreateFlags;
+typedef VkFlags VkPipelineViewportStateCreateFlags;
+typedef VkFlags VkPipelineRasterizationStateCreateFlags;
+
+typedef enum VkCullModeFlagBits {
+ VK_CULL_MODE_NONE = 0,
+ VK_CULL_MODE_FRONT_BIT = 0x00000001,
+ VK_CULL_MODE_BACK_BIT = 0x00000002,
+ VK_CULL_MODE_FRONT_AND_BACK = 0x3,
+} VkCullModeFlagBits;
+typedef VkFlags VkCullModeFlags;
+typedef VkFlags VkPipelineMultisampleStateCreateFlags;
+typedef VkFlags VkPipelineDepthStencilStateCreateFlags;
+typedef VkFlags VkPipelineColorBlendStateCreateFlags;
+
+typedef enum VkColorComponentFlagBits {
+ VK_COLOR_COMPONENT_R_BIT = 0x00000001,
+ VK_COLOR_COMPONENT_G_BIT = 0x00000002,
+ VK_COLOR_COMPONENT_B_BIT = 0x00000004,
+ VK_COLOR_COMPONENT_A_BIT = 0x00000008,
+} VkColorComponentFlagBits;
+typedef VkFlags VkColorComponentFlags;
+typedef VkFlags VkPipelineDynamicStateCreateFlags;
+typedef VkFlags VkPipelineLayoutCreateFlags;
+typedef VkFlags VkShaderStageFlags;
+typedef VkFlags VkSamplerCreateFlags;
+typedef VkFlags VkDescriptorSetLayoutCreateFlags;
+
+typedef enum VkDescriptorPoolCreateFlagBits {
+ VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001,
+} VkDescriptorPoolCreateFlagBits;
+typedef VkFlags VkDescriptorPoolCreateFlags;
+typedef VkFlags VkDescriptorPoolResetFlags;
+typedef VkFlags VkFramebufferCreateFlags;
+typedef VkFlags VkRenderPassCreateFlags;
+
+typedef enum VkAttachmentDescriptionFlagBits {
+ VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001,
+} VkAttachmentDescriptionFlagBits;
+typedef VkFlags VkAttachmentDescriptionFlags;
+typedef VkFlags VkSubpassDescriptionFlags;
+
+typedef enum VkAccessFlagBits {
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001,
+ VK_ACCESS_INDEX_READ_BIT = 0x00000002,
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004,
+ VK_ACCESS_UNIFORM_READ_BIT = 0x00000008,
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010,
+ VK_ACCESS_SHADER_READ_BIT = 0x00000020,
+ VK_ACCESS_SHADER_WRITE_BIT = 0x00000040,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080,
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400,
+ VK_ACCESS_TRANSFER_READ_BIT = 0x00000800,
+ VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000,
+ VK_ACCESS_HOST_READ_BIT = 0x00002000,
+ VK_ACCESS_HOST_WRITE_BIT = 0x00004000,
+ VK_ACCESS_MEMORY_READ_BIT = 0x00008000,
+ VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000,
+} VkAccessFlagBits;
+typedef VkFlags VkAccessFlags;
+
+typedef enum VkDependencyFlagBits {
+ VK_DEPENDENCY_BY_REGION_BIT = 0x00000001,
+} VkDependencyFlagBits;
+typedef VkFlags VkDependencyFlags;
+
+typedef enum VkCommandPoolCreateFlagBits {
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001,
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,
+} VkCommandPoolCreateFlagBits;
+typedef VkFlags VkCommandPoolCreateFlags;
+
+typedef enum VkCommandPoolResetFlagBits {
+ VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+} VkCommandPoolResetFlagBits;
+typedef VkFlags VkCommandPoolResetFlags;
+
+typedef enum VkCommandBufferUsageFlagBits {
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001,
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002,
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004,
+} VkCommandBufferUsageFlagBits;
+typedef VkFlags VkCommandBufferUsageFlags;
+
+typedef enum VkQueryControlFlagBits {
+ VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001,
+} VkQueryControlFlagBits;
+typedef VkFlags VkQueryControlFlags;
+
+typedef enum VkCommandBufferResetFlagBits {
+ VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001,
+} VkCommandBufferResetFlagBits;
+typedef VkFlags VkCommandBufferResetFlags;
+
+typedef enum VkStencilFaceFlagBits {
+ VK_STENCIL_FACE_FRONT_BIT = 0x00000001,
+ VK_STENCIL_FACE_BACK_BIT = 0x00000002,
+ VK_STENCIL_FRONT_AND_BACK = 0x3,
+} VkStencilFaceFlagBits;
+typedef VkFlags VkStencilFaceFlags;
+
+typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(
+ void* pUserData,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)(
+ void* pUserData,
+ void* pOriginal,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkFreeFunction)(
+ void* pUserData,
+ void* pMemory);
+
+typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)(
+ void* pUserData,
+ size_t size,
+ VkInternalAllocationType allocationType,
+ VkSystemAllocationScope allocationScope);
+
+typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void);
+
+typedef struct VkApplicationInfo {
+ VkStructureType sType;
+ const void* pNext;
+ const char* pApplicationName;
+ uint32_t applicationVersion;
+ const char* pEngineName;
+ uint32_t engineVersion;
+ uint32_t apiVersion;
+} VkApplicationInfo;
+
+typedef struct VkInstanceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkInstanceCreateFlags flags;
+ const VkApplicationInfo* pApplicationInfo;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+} VkInstanceCreateInfo;
+
+typedef struct VkAllocationCallbacks {
+ void* pUserData;
+ PFN_vkAllocationFunction pfnAllocation;
+ PFN_vkReallocationFunction pfnReallocation;
+ PFN_vkFreeFunction pfnFree;
+ PFN_vkInternalAllocationNotification pfnInternalAllocation;
+ PFN_vkInternalFreeNotification pfnInternalFree;
+} VkAllocationCallbacks;
+
+typedef struct VkPhysicalDeviceFeatures {
+ VkBool32 robustBufferAccess;
+ VkBool32 fullDrawIndexUint32;
+ VkBool32 imageCubeArray;
+ VkBool32 independentBlend;
+ VkBool32 geometryShader;
+ VkBool32 tessellationShader;
+ VkBool32 sampleRateShading;
+ VkBool32 dualSrcBlend;
+ VkBool32 logicOp;
+ VkBool32 multiDrawIndirect;
+ VkBool32 drawIndirectFirstInstance;
+ VkBool32 depthClamp;
+ VkBool32 depthBiasClamp;
+ VkBool32 fillModeNonSolid;
+ VkBool32 depthBounds;
+ VkBool32 wideLines;
+ VkBool32 largePoints;
+ VkBool32 alphaToOne;
+ VkBool32 multiViewport;
+ VkBool32 samplerAnisotropy;
+ VkBool32 textureCompressionETC2;
+ VkBool32 textureCompressionASTC_LDR;
+ VkBool32 textureCompressionBC;
+ VkBool32 occlusionQueryPrecise;
+ VkBool32 pipelineStatisticsQuery;
+ VkBool32 vertexPipelineStoresAndAtomics;
+ VkBool32 fragmentStoresAndAtomics;
+ VkBool32 shaderTessellationAndGeometryPointSize;
+ VkBool32 shaderImageGatherExtended;
+ VkBool32 shaderStorageImageExtendedFormats;
+ VkBool32 shaderStorageImageMultisample;
+ VkBool32 shaderStorageImageReadWithoutFormat;
+ VkBool32 shaderStorageImageWriteWithoutFormat;
+ VkBool32 shaderUniformBufferArrayDynamicIndexing;
+ VkBool32 shaderSampledImageArrayDynamicIndexing;
+ VkBool32 shaderStorageBufferArrayDynamicIndexing;
+ VkBool32 shaderStorageImageArrayDynamicIndexing;
+ VkBool32 shaderClipDistance;
+ VkBool32 shaderCullDistance;
+ VkBool32 shaderFloat64;
+ VkBool32 shaderInt64;
+ VkBool32 shaderInt16;
+ VkBool32 shaderResourceResidency;
+ VkBool32 shaderResourceMinLod;
+ VkBool32 sparseBinding;
+ VkBool32 sparseResidencyBuffer;
+ VkBool32 sparseResidencyImage2D;
+ VkBool32 sparseResidencyImage3D;
+ VkBool32 sparseResidency2Samples;
+ VkBool32 sparseResidency4Samples;
+ VkBool32 sparseResidency8Samples;
+ VkBool32 sparseResidency16Samples;
+ VkBool32 sparseResidencyAliased;
+ VkBool32 variableMultisampleRate;
+ VkBool32 inheritedQueries;
+} VkPhysicalDeviceFeatures;
+
+typedef struct VkFormatProperties {
+ VkFormatFeatureFlags linearTilingFeatures;
+ VkFormatFeatureFlags optimalTilingFeatures;
+ VkFormatFeatureFlags bufferFeatures;
+} VkFormatProperties;
+
+typedef struct VkExtent3D {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+} VkExtent3D;
+
+typedef struct VkImageFormatProperties {
+ VkExtent3D maxExtent;
+ uint32_t maxMipLevels;
+ uint32_t maxArrayLayers;
+ VkSampleCountFlags sampleCounts;
+ VkDeviceSize maxResourceSize;
+} VkImageFormatProperties;
+
+typedef struct VkPhysicalDeviceLimits {
+ uint32_t maxImageDimension1D;
+ uint32_t maxImageDimension2D;
+ uint32_t maxImageDimension3D;
+ uint32_t maxImageDimensionCube;
+ uint32_t maxImageArrayLayers;
+ uint32_t maxTexelBufferElements;
+ uint32_t maxUniformBufferRange;
+ uint32_t maxStorageBufferRange;
+ uint32_t maxPushConstantsSize;
+ uint32_t maxMemoryAllocationCount;
+ uint32_t maxSamplerAllocationCount;
+ VkDeviceSize bufferImageGranularity;
+ VkDeviceSize sparseAddressSpaceSize;
+ uint32_t maxBoundDescriptorSets;
+ uint32_t maxPerStageDescriptorSamplers;
+ uint32_t maxPerStageDescriptorUniformBuffers;
+ uint32_t maxPerStageDescriptorStorageBuffers;
+ uint32_t maxPerStageDescriptorSampledImages;
+ uint32_t maxPerStageDescriptorStorageImages;
+ uint32_t maxPerStageDescriptorInputAttachments;
+ uint32_t maxPerStageResources;
+ uint32_t maxDescriptorSetSamplers;
+ uint32_t maxDescriptorSetUniformBuffers;
+ uint32_t maxDescriptorSetUniformBuffersDynamic;
+ uint32_t maxDescriptorSetStorageBuffers;
+ uint32_t maxDescriptorSetStorageBuffersDynamic;
+ uint32_t maxDescriptorSetSampledImages;
+ uint32_t maxDescriptorSetStorageImages;
+ uint32_t maxDescriptorSetInputAttachments;
+ uint32_t maxVertexInputAttributes;
+ uint32_t maxVertexInputBindings;
+ uint32_t maxVertexInputAttributeOffset;
+ uint32_t maxVertexInputBindingStride;
+ uint32_t maxVertexOutputComponents;
+ uint32_t maxTessellationGenerationLevel;
+ uint32_t maxTessellationPatchSize;
+ uint32_t maxTessellationControlPerVertexInputComponents;
+ uint32_t maxTessellationControlPerVertexOutputComponents;
+ uint32_t maxTessellationControlPerPatchOutputComponents;
+ uint32_t maxTessellationControlTotalOutputComponents;
+ uint32_t maxTessellationEvaluationInputComponents;
+ uint32_t maxTessellationEvaluationOutputComponents;
+ uint32_t maxGeometryShaderInvocations;
+ uint32_t maxGeometryInputComponents;
+ uint32_t maxGeometryOutputComponents;
+ uint32_t maxGeometryOutputVertices;
+ uint32_t maxGeometryTotalOutputComponents;
+ uint32_t maxFragmentInputComponents;
+ uint32_t maxFragmentOutputAttachments;
+ uint32_t maxFragmentDualSrcAttachments;
+ uint32_t maxFragmentCombinedOutputResources;
+ uint32_t maxComputeSharedMemorySize;
+ uint32_t maxComputeWorkGroupCount[3];
+ uint32_t maxComputeWorkGroupInvocations;
+ uint32_t maxComputeWorkGroupSize[3];
+ uint32_t subPixelPrecisionBits;
+ uint32_t subTexelPrecisionBits;
+ uint32_t mipmapPrecisionBits;
+ uint32_t maxDrawIndexedIndexValue;
+ uint32_t maxDrawIndirectCount;
+ float maxSamplerLodBias;
+ float maxSamplerAnisotropy;
+ uint32_t maxViewports;
+ uint32_t maxViewportDimensions[2];
+ float viewportBoundsRange[2];
+ uint32_t viewportSubPixelBits;
+ size_t minMemoryMapAlignment;
+ VkDeviceSize minTexelBufferOffsetAlignment;
+ VkDeviceSize minUniformBufferOffsetAlignment;
+ VkDeviceSize minStorageBufferOffsetAlignment;
+ int32_t minTexelOffset;
+ uint32_t maxTexelOffset;
+ int32_t minTexelGatherOffset;
+ uint32_t maxTexelGatherOffset;
+ float minInterpolationOffset;
+ float maxInterpolationOffset;
+ uint32_t subPixelInterpolationOffsetBits;
+ uint32_t maxFramebufferWidth;
+ uint32_t maxFramebufferHeight;
+ uint32_t maxFramebufferLayers;
+ VkSampleCountFlags framebufferColorSampleCounts;
+ VkSampleCountFlags framebufferDepthSampleCounts;
+ VkSampleCountFlags framebufferStencilSampleCounts;
+ VkSampleCountFlags framebufferNoAttachmentsSampleCounts;
+ uint32_t maxColorAttachments;
+ VkSampleCountFlags sampledImageColorSampleCounts;
+ VkSampleCountFlags sampledImageIntegerSampleCounts;
+ VkSampleCountFlags sampledImageDepthSampleCounts;
+ VkSampleCountFlags sampledImageStencilSampleCounts;
+ VkSampleCountFlags storageImageSampleCounts;
+ uint32_t maxSampleMaskWords;
+ VkBool32 timestampComputeAndGraphics;
+ float timestampPeriod;
+ uint32_t maxClipDistances;
+ uint32_t maxCullDistances;
+ uint32_t maxCombinedClipAndCullDistances;
+ uint32_t discreteQueuePriorities;
+ float pointSizeRange[2];
+ float lineWidthRange[2];
+ float pointSizeGranularity;
+ float lineWidthGranularity;
+ VkBool32 strictLines;
+ VkBool32 standardSampleLocations;
+ VkDeviceSize optimalBufferCopyOffsetAlignment;
+ VkDeviceSize optimalBufferCopyRowPitchAlignment;
+ VkDeviceSize nonCoherentAtomSize;
+} VkPhysicalDeviceLimits;
+
+typedef struct VkPhysicalDeviceSparseProperties {
+ VkBool32 residencyStandard2DBlockShape;
+ VkBool32 residencyStandard2DMultisampleBlockShape;
+ VkBool32 residencyStandard3DBlockShape;
+ VkBool32 residencyAlignedMipSize;
+ VkBool32 residencyNonResidentStrict;
+} VkPhysicalDeviceSparseProperties;
+
+typedef struct VkPhysicalDeviceProperties {
+ uint32_t apiVersion;
+ uint32_t driverVersion;
+ uint32_t vendorID;
+ uint32_t deviceID;
+ VkPhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
+ uint8_t pipelineCacheUUID[VK_UUID_SIZE];
+ VkPhysicalDeviceLimits limits;
+ VkPhysicalDeviceSparseProperties sparseProperties;
+} VkPhysicalDeviceProperties;
+
+typedef struct VkQueueFamilyProperties {
+ VkQueueFlags queueFlags;
+ uint32_t queueCount;
+ uint32_t timestampValidBits;
+ VkExtent3D minImageTransferGranularity;
+} VkQueueFamilyProperties;
+
+typedef struct VkMemoryType {
+ VkMemoryPropertyFlags propertyFlags;
+ uint32_t heapIndex;
+} VkMemoryType;
+
+typedef struct VkMemoryHeap {
+ VkDeviceSize size;
+ VkMemoryHeapFlags flags;
+} VkMemoryHeap;
+
+typedef struct VkPhysicalDeviceMemoryProperties {
+ uint32_t memoryTypeCount;
+ VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
+ uint32_t memoryHeapCount;
+ VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS];
+} VkPhysicalDeviceMemoryProperties;
+
+typedef struct VkDeviceQueueCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceQueueCreateFlags flags;
+ uint32_t queueFamilyIndex;
+ uint32_t queueCount;
+ const float* pQueuePriorities;
+} VkDeviceQueueCreateInfo;
+
+typedef struct VkDeviceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceCreateFlags flags;
+ uint32_t queueCreateInfoCount;
+ const VkDeviceQueueCreateInfo* pQueueCreateInfos;
+ uint32_t enabledLayerCount;
+ const char* const* ppEnabledLayerNames;
+ uint32_t enabledExtensionCount;
+ const char* const* ppEnabledExtensionNames;
+ const VkPhysicalDeviceFeatures* pEnabledFeatures;
+} VkDeviceCreateInfo;
+
+typedef struct VkExtensionProperties {
+ char extensionName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+} VkExtensionProperties;
+
+typedef struct VkLayerProperties {
+ char layerName[VK_MAX_EXTENSION_NAME_SIZE];
+ uint32_t specVersion;
+ uint32_t implementationVersion;
+ char description[VK_MAX_DESCRIPTION_SIZE];
+} VkLayerProperties;
+
+typedef struct VkSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ const VkPipelineStageFlags* pWaitDstStageMask;
+ uint32_t commandBufferCount;
+ const VkCommandBuffer* pCommandBuffers;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore* pSignalSemaphores;
+} VkSubmitInfo;
+
+typedef struct VkMemoryAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+} VkMemoryAllocateInfo;
+
+typedef struct VkMappedMemoryRange {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkMappedMemoryRange;
+
+typedef struct VkMemoryRequirements {
+ VkDeviceSize size;
+ VkDeviceSize alignment;
+ uint32_t memoryTypeBits;
+} VkMemoryRequirements;
+
+typedef struct VkSparseImageFormatProperties {
+ VkImageAspectFlags aspectMask;
+ VkExtent3D imageGranularity;
+ VkSparseImageFormatFlags flags;
+} VkSparseImageFormatProperties;
+
+typedef struct VkSparseImageMemoryRequirements {
+ VkSparseImageFormatProperties formatProperties;
+ uint32_t imageMipTailFirstLod;
+ VkDeviceSize imageMipTailSize;
+ VkDeviceSize imageMipTailOffset;
+ VkDeviceSize imageMipTailStride;
+} VkSparseImageMemoryRequirements;
+
+typedef struct VkSparseMemoryBind {
+ VkDeviceSize resourceOffset;
+ VkDeviceSize size;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseMemoryBind;
+
+typedef struct VkSparseBufferMemoryBindInfo {
+ VkBuffer buffer;
+ uint32_t bindCount;
+ const VkSparseMemoryBind* pBinds;
+} VkSparseBufferMemoryBindInfo;
+
+typedef struct VkSparseImageOpaqueMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseMemoryBind* pBinds;
+} VkSparseImageOpaqueMemoryBindInfo;
+
+typedef struct VkImageSubresource {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t arrayLayer;
+} VkImageSubresource;
+
+typedef struct VkOffset3D {
+ int32_t x;
+ int32_t y;
+ int32_t z;
+} VkOffset3D;
+
+typedef struct VkSparseImageMemoryBind {
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ VkSparseMemoryBindFlags flags;
+} VkSparseImageMemoryBind;
+
+typedef struct VkSparseImageMemoryBindInfo {
+ VkImage image;
+ uint32_t bindCount;
+ const VkSparseImageMemoryBind* pBinds;
+} VkSparseImageMemoryBindInfo;
+
+typedef struct VkBindSparseInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ uint32_t bufferBindCount;
+ const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ uint32_t imageOpaqueBindCount;
+ const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ uint32_t imageBindCount;
+ const VkSparseImageMemoryBindInfo* pImageBinds;
+ uint32_t signalSemaphoreCount;
+ const VkSemaphore* pSignalSemaphores;
+} VkBindSparseInfo;
+
+typedef struct VkFenceCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFenceCreateFlags flags;
+} VkFenceCreateInfo;
+
+typedef struct VkSemaphoreCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreCreateFlags flags;
+} VkSemaphoreCreateInfo;
+
+typedef struct VkEventCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkEventCreateFlags flags;
+} VkEventCreateInfo;
+
+typedef struct VkQueryPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkQueryPoolCreateFlags flags;
+ VkQueryType queryType;
+ uint32_t queryCount;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkQueryPoolCreateInfo;
+
+typedef struct VkBufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferCreateFlags flags;
+ VkDeviceSize size;
+ VkBufferUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+} VkBufferCreateInfo;
+
+typedef struct VkBufferViewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkBufferViewCreateFlags flags;
+ VkBuffer buffer;
+ VkFormat format;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkBufferViewCreateInfo;
+
+typedef struct VkImageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageCreateFlags flags;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
+ uint32_t mipLevels;
+ uint32_t arrayLayers;
+ VkSampleCountFlagBits samples;
+ VkImageTiling tiling;
+ VkImageUsageFlags usage;
+ VkSharingMode sharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ VkImageLayout initialLayout;
+} VkImageCreateInfo;
+
+typedef struct VkSubresourceLayout {
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ VkDeviceSize rowPitch;
+ VkDeviceSize arrayPitch;
+ VkDeviceSize depthPitch;
+} VkSubresourceLayout;
+
+typedef struct VkComponentMapping {
+ VkComponentSwizzle r;
+ VkComponentSwizzle g;
+ VkComponentSwizzle b;
+ VkComponentSwizzle a;
+} VkComponentMapping;
+
+typedef struct VkImageSubresourceRange {
+ VkImageAspectFlags aspectMask;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceRange;
+
+typedef struct VkImageViewCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageViewCreateFlags flags;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkComponentMapping components;
+ VkImageSubresourceRange subresourceRange;
+} VkImageViewCreateInfo;
+
+typedef struct VkShaderModuleCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkShaderModuleCreateFlags flags;
+ size_t codeSize;
+ const uint32_t* pCode;
+} VkShaderModuleCreateInfo;
+
+typedef struct VkPipelineCacheCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCacheCreateFlags flags;
+ size_t initialDataSize;
+ const void* pInitialData;
+} VkPipelineCacheCreateInfo;
+
+typedef struct VkSpecializationMapEntry {
+ uint32_t constantID;
+ uint32_t offset;
+ size_t size;
+} VkSpecializationMapEntry;
+
+typedef struct VkSpecializationInfo {
+ uint32_t mapEntryCount;
+ const VkSpecializationMapEntry* pMapEntries;
+ size_t dataSize;
+ const void* pData;
+} VkSpecializationInfo;
+
+typedef struct VkPipelineShaderStageCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineShaderStageCreateFlags flags;
+ VkShaderStageFlagBits stage;
+ VkShaderModule module;
+ const char* pName;
+ const VkSpecializationInfo* pSpecializationInfo;
+} VkPipelineShaderStageCreateInfo;
+
+typedef struct VkVertexInputBindingDescription {
+ uint32_t binding;
+ uint32_t stride;
+ VkVertexInputRate inputRate;
+} VkVertexInputBindingDescription;
+
+typedef struct VkVertexInputAttributeDescription {
+ uint32_t location;
+ uint32_t binding;
+ VkFormat format;
+ uint32_t offset;
+} VkVertexInputAttributeDescription;
+
+typedef struct VkPipelineVertexInputStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineVertexInputStateCreateFlags flags;
+ uint32_t vertexBindingDescriptionCount;
+ const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ uint32_t vertexAttributeDescriptionCount;
+ const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+} VkPipelineVertexInputStateCreateInfo;
+
+typedef struct VkPipelineInputAssemblyStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineInputAssemblyStateCreateFlags flags;
+ VkPrimitiveTopology topology;
+ VkBool32 primitiveRestartEnable;
+} VkPipelineInputAssemblyStateCreateInfo;
+
+typedef struct VkPipelineTessellationStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineTessellationStateCreateFlags flags;
+ uint32_t patchControlPoints;
+} VkPipelineTessellationStateCreateInfo;
+
+typedef struct VkViewport {
+ float x;
+ float y;
+ float width;
+ float height;
+ float minDepth;
+ float maxDepth;
+} VkViewport;
+
+typedef struct VkOffset2D {
+ int32_t x;
+ int32_t y;
+} VkOffset2D;
+
+typedef struct VkExtent2D {
+ uint32_t width;
+ uint32_t height;
+} VkExtent2D;
+
+typedef struct VkRect2D {
+ VkOffset2D offset;
+ VkExtent2D extent;
+} VkRect2D;
+
+typedef struct VkPipelineViewportStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineViewportStateCreateFlags flags;
+ uint32_t viewportCount;
+ const VkViewport* pViewports;
+ uint32_t scissorCount;
+ const VkRect2D* pScissors;
+} VkPipelineViewportStateCreateInfo;
+
+typedef struct VkPipelineRasterizationStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineRasterizationStateCreateFlags flags;
+ VkBool32 depthClampEnable;
+ VkBool32 rasterizerDiscardEnable;
+ VkPolygonMode polygonMode;
+ VkCullModeFlags cullMode;
+ VkFrontFace frontFace;
+ VkBool32 depthBiasEnable;
+ float depthBiasConstantFactor;
+ float depthBiasClamp;
+ float depthBiasSlopeFactor;
+ float lineWidth;
+} VkPipelineRasterizationStateCreateInfo;
+
+typedef struct VkPipelineMultisampleStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineMultisampleStateCreateFlags flags;
+ VkSampleCountFlagBits rasterizationSamples;
+ VkBool32 sampleShadingEnable;
+ float minSampleShading;
+ const VkSampleMask* pSampleMask;
+ VkBool32 alphaToCoverageEnable;
+ VkBool32 alphaToOneEnable;
+} VkPipelineMultisampleStateCreateInfo;
+
+typedef struct VkStencilOpState {
+ VkStencilOp failOp;
+ VkStencilOp passOp;
+ VkStencilOp depthFailOp;
+ VkCompareOp compareOp;
+ uint32_t compareMask;
+ uint32_t writeMask;
+ uint32_t reference;
+} VkStencilOpState;
+
+typedef struct VkPipelineDepthStencilStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDepthStencilStateCreateFlags flags;
+ VkBool32 depthTestEnable;
+ VkBool32 depthWriteEnable;
+ VkCompareOp depthCompareOp;
+ VkBool32 depthBoundsTestEnable;
+ VkBool32 stencilTestEnable;
+ VkStencilOpState front;
+ VkStencilOpState back;
+ float minDepthBounds;
+ float maxDepthBounds;
+} VkPipelineDepthStencilStateCreateInfo;
+
+typedef struct VkPipelineColorBlendAttachmentState {
+ VkBool32 blendEnable;
+ VkBlendFactor srcColorBlendFactor;
+ VkBlendFactor dstColorBlendFactor;
+ VkBlendOp colorBlendOp;
+ VkBlendFactor srcAlphaBlendFactor;
+ VkBlendFactor dstAlphaBlendFactor;
+ VkBlendOp alphaBlendOp;
+ VkColorComponentFlags colorWriteMask;
+} VkPipelineColorBlendAttachmentState;
+
+typedef struct VkPipelineColorBlendStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineColorBlendStateCreateFlags flags;
+ VkBool32 logicOpEnable;
+ VkLogicOp logicOp;
+ uint32_t attachmentCount;
+ const VkPipelineColorBlendAttachmentState* pAttachments;
+ float blendConstants[4];
+} VkPipelineColorBlendStateCreateInfo;
+
+typedef struct VkPipelineDynamicStateCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineDynamicStateCreateFlags flags;
+ uint32_t dynamicStateCount;
+ const VkDynamicState* pDynamicStates;
+} VkPipelineDynamicStateCreateInfo;
+
+typedef struct VkGraphicsPipelineCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ const VkPipelineViewportStateCreateInfo* pViewportState;
+ const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
+ const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ const VkPipelineDynamicStateCreateInfo* pDynamicState;
+ VkPipelineLayout layout;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkGraphicsPipelineCreateInfo;
+
+typedef struct VkComputePipelineCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ VkPipelineShaderStageCreateInfo stage;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkComputePipelineCreateInfo;
+
+typedef struct VkPushConstantRange {
+ VkShaderStageFlags stageFlags;
+ uint32_t offset;
+ uint32_t size;
+} VkPushConstantRange;
+
+typedef struct VkPipelineLayoutCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineLayoutCreateFlags flags;
+ uint32_t setLayoutCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+ uint32_t pushConstantRangeCount;
+ const VkPushConstantRange* pPushConstantRanges;
+} VkPipelineLayoutCreateInfo;
+
+typedef struct VkSamplerCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSamplerCreateFlags flags;
+ VkFilter magFilter;
+ VkFilter minFilter;
+ VkSamplerMipmapMode mipmapMode;
+ VkSamplerAddressMode addressModeU;
+ VkSamplerAddressMode addressModeV;
+ VkSamplerAddressMode addressModeW;
+ float mipLodBias;
+ VkBool32 anisotropyEnable;
+ float maxAnisotropy;
+ VkBool32 compareEnable;
+ VkCompareOp compareOp;
+ float minLod;
+ float maxLod;
+ VkBorderColor borderColor;
+ VkBool32 unnormalizedCoordinates;
+} VkSamplerCreateInfo;
+
+typedef struct VkDescriptorSetLayoutBinding {
+ uint32_t binding;
+ VkDescriptorType descriptorType;
+ uint32_t descriptorCount;
+ VkShaderStageFlags stageFlags;
+ const VkSampler* pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
+
+typedef struct VkDescriptorSetLayoutCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSetLayoutCreateFlags flags;
+ uint32_t bindingCount;
+ const VkDescriptorSetLayoutBinding* pBindings;
+} VkDescriptorSetLayoutCreateInfo;
+
+typedef struct VkDescriptorPoolSize {
+ VkDescriptorType type;
+ uint32_t descriptorCount;
+} VkDescriptorPoolSize;
+
+typedef struct VkDescriptorPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorPoolCreateFlags flags;
+ uint32_t maxSets;
+ uint32_t poolSizeCount;
+ const VkDescriptorPoolSize* pPoolSizes;
+} VkDescriptorPoolCreateInfo;
+
+typedef struct VkDescriptorSetAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorPool descriptorPool;
+ uint32_t descriptorSetCount;
+ const VkDescriptorSetLayout* pSetLayouts;
+} VkDescriptorSetAllocateInfo;
+
+typedef struct VkDescriptorImageInfo {
+ VkSampler sampler;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+} VkDescriptorImageInfo;
+
+typedef struct VkDescriptorBufferInfo {
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize range;
+} VkDescriptorBufferInfo;
+
+typedef struct VkWriteDescriptorSet {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+ VkDescriptorType descriptorType;
+ const VkDescriptorImageInfo* pImageInfo;
+ const VkDescriptorBufferInfo* pBufferInfo;
+ const VkBufferView* pTexelBufferView;
+} VkWriteDescriptorSet;
+
+typedef struct VkCopyDescriptorSet {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSet srcSet;
+ uint32_t srcBinding;
+ uint32_t srcArrayElement;
+ VkDescriptorSet dstSet;
+ uint32_t dstBinding;
+ uint32_t dstArrayElement;
+ uint32_t descriptorCount;
+} VkCopyDescriptorSet;
+
+typedef struct VkFramebufferCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkFramebufferCreateFlags flags;
+ VkRenderPass renderPass;
+ uint32_t attachmentCount;
+ const VkImageView* pAttachments;
+ uint32_t width;
+ uint32_t height;
+ uint32_t layers;
+} VkFramebufferCreateInfo;
+
+typedef struct VkAttachmentDescription {
+ VkAttachmentDescriptionFlags flags;
+ VkFormat format;
+ VkSampleCountFlagBits samples;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkAttachmentLoadOp stencilLoadOp;
+ VkAttachmentStoreOp stencilStoreOp;
+ VkImageLayout initialLayout;
+ VkImageLayout finalLayout;
+} VkAttachmentDescription;
+
+typedef struct VkAttachmentReference {
+ uint32_t attachment;
+ VkImageLayout layout;
+} VkAttachmentReference;
+
+typedef struct VkSubpassDescription {
+ VkSubpassDescriptionFlags flags;
+ VkPipelineBindPoint pipelineBindPoint;
+ uint32_t inputAttachmentCount;
+ const VkAttachmentReference* pInputAttachments;
+ uint32_t colorAttachmentCount;
+ const VkAttachmentReference* pColorAttachments;
+ const VkAttachmentReference* pResolveAttachments;
+ const VkAttachmentReference* pDepthStencilAttachment;
+ uint32_t preserveAttachmentCount;
+ const uint32_t* pPreserveAttachments;
+} VkSubpassDescription;
+
+typedef struct VkSubpassDependency {
+ uint32_t srcSubpass;
+ uint32_t dstSubpass;
+ VkPipelineStageFlags srcStageMask;
+ VkPipelineStageFlags dstStageMask;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkDependencyFlags dependencyFlags;
+} VkSubpassDependency;
+
+typedef struct VkRenderPassCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPassCreateFlags flags;
+ uint32_t attachmentCount;
+ const VkAttachmentDescription* pAttachments;
+ uint32_t subpassCount;
+ const VkSubpassDescription* pSubpasses;
+ uint32_t dependencyCount;
+ const VkSubpassDependency* pDependencies;
+} VkRenderPassCreateInfo;
+
+typedef struct VkCommandPoolCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandPoolCreateFlags flags;
+ uint32_t queueFamilyIndex;
+} VkCommandPoolCreateInfo;
+
+typedef struct VkCommandBufferAllocateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandPool commandPool;
+ VkCommandBufferLevel level;
+ uint32_t commandBufferCount;
+} VkCommandBufferAllocateInfo;
+
+typedef struct VkCommandBufferInheritanceInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ uint32_t subpass;
+ VkFramebuffer framebuffer;
+ VkBool32 occlusionQueryEnable;
+ VkQueryControlFlags queryFlags;
+ VkQueryPipelineStatisticFlags pipelineStatistics;
+} VkCommandBufferInheritanceInfo;
+
+typedef struct VkCommandBufferBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandBufferUsageFlags flags;
+ const VkCommandBufferInheritanceInfo* pInheritanceInfo;
+} VkCommandBufferBeginInfo;
+
+typedef struct VkBufferCopy {
+ VkDeviceSize srcOffset;
+ VkDeviceSize dstOffset;
+ VkDeviceSize size;
+} VkBufferCopy;
+
+typedef struct VkImageSubresourceLayers {
+ VkImageAspectFlags aspectMask;
+ uint32_t mipLevel;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkImageSubresourceLayers;
+
+typedef struct VkImageCopy {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageCopy;
+
+typedef struct VkImageBlit {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffsets[2];
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffsets[2];
+} VkImageBlit;
+
+typedef struct VkBufferImageCopy {
+ VkDeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ VkImageSubresourceLayers imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy;
+
+typedef union VkClearColorValue {
+ float float32[4];
+ int32_t int32[4];
+ uint32_t uint32[4];
+} VkClearColorValue;
+
+typedef struct VkClearDepthStencilValue {
+ float depth;
+ uint32_t stencil;
+} VkClearDepthStencilValue;
+
+typedef union VkClearValue {
+ VkClearColorValue color;
+ VkClearDepthStencilValue depthStencil;
+} VkClearValue;
+
+typedef struct VkClearAttachment {
+ VkImageAspectFlags aspectMask;
+ uint32_t colorAttachment;
+ VkClearValue clearValue;
+} VkClearAttachment;
+
+typedef struct VkClearRect {
+ VkRect2D rect;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+} VkClearRect;
+
+typedef struct VkImageResolve {
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageResolve;
+
+typedef struct VkMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+} VkMemoryBarrier;
+
+typedef struct VkBufferMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkBufferMemoryBarrier;
+
+typedef struct VkImageMemoryBarrier {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccessFlags srcAccessMask;
+ VkAccessFlags dstAccessMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+} VkImageMemoryBarrier;
+
+typedef struct VkRenderPassBeginInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+ VkRect2D renderArea;
+ uint32_t clearValueCount;
+ const VkClearValue* pClearValues;
+} VkRenderPassBeginInfo;
+
+typedef struct VkDispatchIndirectCommand {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+} VkDispatchIndirectCommand;
+
+typedef struct VkDrawIndexedIndirectCommand {
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t vertexOffset;
+ uint32_t firstInstance;
+} VkDrawIndexedIndirectCommand;
+
+typedef struct VkDrawIndirectCommand {
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+} VkDrawIndirectCommand;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance);
+typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName);
+typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
+typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue);
+typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
+typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);
+typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory);
+typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);
+typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);
+typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);
+typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
+typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence);
+typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore);
+typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent);
+typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage);
+typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView);
+typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
+typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler);
+typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets);
+typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets);
+typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
+typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
+typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
+typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
+typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers);
+typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
+typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer);
+typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports);
+typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors);
+typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);
+typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
+typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
+typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects);
+typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions);
+typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
+typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
+ const VkInstanceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkInstance* pInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
+ VkInstance instance,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(
+ VkInstance instance,
+ uint32_t* pPhysicalDeviceCount,
+ VkPhysicalDevice* pPhysicalDevices);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures* pFeatures);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkFormatProperties* pFormatProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageFormatProperties* pImageFormatProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties* pQueueFamilyProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(
+ VkInstance instance,
+ const char* pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
+ VkDevice device,
+ const char* pName);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
+ VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDevice* pDevice);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
+ VkDevice device,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
+ VkPhysicalDevice physicalDevice,
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
+ uint32_t* pPropertyCount,
+ VkLayerProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkLayerProperties* pProperties);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
+ VkDevice device,
+ uint32_t queueFamilyIndex,
+ uint32_t queueIndex,
+ VkQueue* pQueue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
+ VkQueue queue,
+ uint32_t submitCount,
+ const VkSubmitInfo* pSubmits,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
+ VkQueue queue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
+ VkDevice device);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
+ VkDevice device,
+ const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDeviceMemory* pMemory);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeMemory(
+ VkDevice device,
+ VkDeviceMemory memory,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ void** ppData);
+
+VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(
+ VkDevice device,
+ VkDeviceMemory memory);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange* pMemoryRanges);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
+ VkDevice device,
+ uint32_t memoryRangeCount,
+ const VkMappedMemoryRange* pMemoryRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(
+ VkDevice device,
+ VkDeviceMemory memory,
+ VkDeviceSize* pCommittedMemoryInBytes);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
+ VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
+ VkDevice device,
+ VkImage image,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset);
+
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(
+ VkDevice device,
+ VkBuffer buffer,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ VkMemoryRequirements* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(
+ VkDevice device,
+ VkImage image,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ VkImageType type,
+ VkSampleCountFlagBits samples,
+ VkImageUsageFlags usage,
+ VkImageTiling tiling,
+ uint32_t* pPropertyCount,
+ VkSparseImageFormatProperties* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
+ VkQueue queue,
+ uint32_t bindInfoCount,
+ const VkBindSparseInfo* pBindInfo,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
+ VkDevice device,
+ const VkFenceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFence(
+ VkDevice device,
+ VkFence fence,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
+ VkDevice device,
+ VkFence fence);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
+ VkDevice device,
+ uint32_t fenceCount,
+ const VkFence* pFences,
+ VkBool32 waitAll,
+ uint64_t timeout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
+ VkDevice device,
+ const VkSemaphoreCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSemaphore* pSemaphore);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(
+ VkDevice device,
+ VkSemaphore semaphore,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
+ VkDevice device,
+ const VkEventCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkEvent* pEvent);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(
+ VkDevice device,
+ VkEvent event,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(
+ VkDevice device,
+ VkEvent event);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
+ VkDevice device,
+ const VkQueryPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkQueryPool* pQueryPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(
+ VkDevice device,
+ VkQueryPool queryPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(
+ VkDevice device,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ size_t dataSize,
+ void* pData,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
+ VkDevice device,
+ const VkBufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBuffer* pBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(
+ VkDevice device,
+ VkBuffer buffer,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
+ VkDevice device,
+ const VkBufferViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBufferView* pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(
+ VkDevice device,
+ VkBufferView bufferView,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
+ VkDevice device,
+ const VkImageCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkImage* pImage);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImage(
+ VkDevice device,
+ VkImage image,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
+ VkDevice device,
+ VkImage image,
+ const VkImageSubresource* pSubresource,
+ VkSubresourceLayout* pLayout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
+ VkDevice device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkImageView* pView);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(
+ VkDevice device,
+ VkImageView imageView,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
+ VkDevice device,
+ const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkShaderModule* pShaderModule);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(
+ VkDevice device,
+ VkShaderModule shaderModule,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
+ VkDevice device,
+ const VkPipelineCacheCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineCache* pPipelineCache);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ size_t* pDataSize,
+ void* pData);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
+ VkDevice device,
+ VkPipelineCache dstCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache* pSrcCaches);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(
+ VkDevice device,
+ VkPipeline pipeline,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
+ VkDevice device,
+ const VkPipelineLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineLayout* pPipelineLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(
+ VkDevice device,
+ VkPipelineLayout pipelineLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(
+ VkDevice device,
+ const VkSamplerCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSampler* pSampler);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySampler(
+ VkDevice device,
+ VkSampler sampler,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorSetLayout* pSetLayout);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(
+ VkDevice device,
+ const VkDescriptorPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorPool* pDescriptorPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorPoolResetFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(
+ VkDevice device,
+ const VkDescriptorSetAllocateInfo* pAllocateInfo,
+ VkDescriptorSet* pDescriptorSets);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(
+ VkDevice device,
+ VkDescriptorPool descriptorPool,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets);
+
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
+ VkDevice device,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites,
+ uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet* pDescriptorCopies);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
+ VkDevice device,
+ const VkFramebufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkFramebuffer* pFramebuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(
+ VkDevice device,
+ VkFramebuffer framebuffer,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
+ VkDevice device,
+ const VkRenderPassCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkRenderPass* pRenderPass);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(
+ VkDevice device,
+ VkRenderPass renderPass,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(
+ VkDevice device,
+ VkRenderPass renderPass,
+ VkExtent2D* pGranularity);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
+ VkDevice device,
+ const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkCommandPool* pCommandPool);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
+ VkDevice device,
+ const VkCommandBufferAllocateInfo* pAllocateInfo,
+ VkCommandBuffer* pCommandBuffers);
+
+VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(
+ VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo* pBeginInfo);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* pViewports);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(
+ VkCommandBuffer commandBuffer,
+ float lineWidth);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
+ VkCommandBuffer commandBuffer,
+ float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(
+ VkCommandBuffer commandBuffer,
+ const float blendConstants[4]);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
+ VkCommandBuffer commandBuffer,
+ float minDepthBounds,
+ float maxDepthBounds);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t compareMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t writeMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t reference);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDraw(
+ VkCommandBuffer commandBuffer,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(
+ VkCommandBuffer commandBuffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(
+ VkCommandBuffer commandBuffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit* pRegions,
+ VkFilter filter);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const uint32_t* pData);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize size,
+ uint32_t data);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearColorValue* pColor,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearDepthStencilValue* pDepthStencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
+ VkCommandBuffer commandBuffer,
+ uint32_t attachmentCount,
+ const VkClearAttachment* pAttachments,
+ uint32_t rectCount,
+ const VkClearRect* pRects);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageResolve* pRegions);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(
+ VkCommandBuffer commandBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void* pValues);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(
+ VkCommandBuffer commandBuffer,
+ VkSubpassContents contents);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(
+ VkCommandBuffer commandBuffer,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+#endif
+
+#define VK_KHR_surface 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
+
+#define VK_KHR_SURFACE_SPEC_VERSION 25
+#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface"
+
+
+typedef enum VkColorSpaceKHR {
+ VK_COLORSPACE_SRGB_NONLINEAR_KHR = 0,
+ VK_COLORSPACE_BEGIN_RANGE = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
+ VK_COLORSPACE_END_RANGE = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
+ VK_COLORSPACE_RANGE_SIZE = (VK_COLORSPACE_SRGB_NONLINEAR_KHR - VK_COLORSPACE_SRGB_NONLINEAR_KHR + 1),
+ VK_COLORSPACE_MAX_ENUM = 0x7FFFFFFF
+} VkColorSpaceKHR;
+
+typedef enum VkPresentModeKHR {
+ VK_PRESENT_MODE_IMMEDIATE_KHR = 0,
+ VK_PRESENT_MODE_MAILBOX_KHR = 1,
+ VK_PRESENT_MODE_FIFO_KHR = 2,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3,
+ VK_PRESENT_MODE_BEGIN_RANGE = VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_END_RANGE = VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_RANGE_SIZE = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1),
+ VK_PRESENT_MODE_MAX_ENUM = 0x7FFFFFFF
+} VkPresentModeKHR;
+
+
+typedef enum VkSurfaceTransformFlagBitsKHR {
+ VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001,
+ VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002,
+ VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004,
+ VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040,
+ VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080,
+ VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100,
+} VkSurfaceTransformFlagBitsKHR;
+typedef VkFlags VkSurfaceTransformFlagsKHR;
+
+typedef enum VkCompositeAlphaFlagBitsKHR {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008,
+} VkCompositeAlphaFlagBitsKHR;
+typedef VkFlags VkCompositeAlphaFlagsKHR;
+
+typedef struct VkSurfaceCapabilitiesKHR {
+ uint32_t minImageCount;
+ uint32_t maxImageCount;
+ VkExtent2D currentExtent;
+ VkExtent2D minImageExtent;
+ VkExtent2D maxImageExtent;
+ uint32_t maxImageArrayLayers;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkSurfaceTransformFlagBitsKHR currentTransform;
+ VkCompositeAlphaFlagsKHR supportedCompositeAlpha;
+ VkImageUsageFlags supportedUsageFlags;
+} VkSurfaceCapabilitiesKHR;
+
+typedef struct VkSurfaceFormatKHR {
+ VkFormat format;
+ VkColorSpaceKHR colorSpace;
+} VkSurfaceFormatKHR;
+
+
+typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(
+ VkInstance instance,
+ VkSurfaceKHR surface,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface,
+ VkBool32* pSupported);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pSurfaceFormatCount,
+ VkSurfaceFormatKHR* pSurfaceFormats);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkSurfaceKHR surface,
+ uint32_t* pPresentModeCount,
+ VkPresentModeKHR* pPresentModes);
+#endif
+
+#define VK_KHR_swapchain 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR)
+
+#define VK_KHR_SWAPCHAIN_SPEC_VERSION 67
+#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain"
+
+typedef VkFlags VkSwapchainCreateFlagsKHR;
+
+typedef struct VkSwapchainCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSwapchainCreateFlagsKHR flags;
+ VkSurfaceKHR surface;
+ uint32_t minImageCount;
+ VkFormat imageFormat;
+ VkColorSpaceKHR imageColorSpace;
+ VkExtent2D imageExtent;
+ uint32_t imageArrayLayers;
+ VkImageUsageFlags imageUsage;
+ VkSharingMode imageSharingMode;
+ uint32_t queueFamilyIndexCount;
+ const uint32_t* pQueueFamilyIndices;
+ VkSurfaceTransformFlagBitsKHR preTransform;
+ VkCompositeAlphaFlagBitsKHR compositeAlpha;
+ VkPresentModeKHR presentMode;
+ VkBool32 clipped;
+ VkSwapchainKHR oldSwapchain;
+} VkSwapchainCreateInfoKHR;
+
+typedef struct VkPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreCount;
+ const VkSemaphore* pWaitSemaphores;
+ uint32_t swapchainCount;
+ const VkSwapchainKHR* pSwapchains;
+ const uint32_t* pImageIndices;
+ VkResult* pResults;
+} VkPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
+typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages);
+typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);
+typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
+ VkDevice device,
+ const VkSwapchainCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSwapchainKHR* pSwapchain);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t* pSwapchainImageCount,
+ VkImage* pSwapchainImages);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint64_t timeout,
+ VkSemaphore semaphore,
+ VkFence fence,
+ uint32_t* pImageIndex);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
+ VkQueue queue,
+ const VkPresentInfoKHR* pPresentInfo);
+#endif
+
+#define VK_KHR_display 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR)
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR)
+
+#define VK_KHR_DISPLAY_SPEC_VERSION 21
+#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display"
+
+
+typedef enum VkDisplayPlaneAlphaFlagBitsKHR {
+ VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,
+ VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002,
+ VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004,
+ VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008,
+} VkDisplayPlaneAlphaFlagBitsKHR;
+typedef VkFlags VkDisplayModeCreateFlagsKHR;
+typedef VkFlags VkDisplayPlaneAlphaFlagsKHR;
+typedef VkFlags VkDisplaySurfaceCreateFlagsKHR;
+
+typedef struct VkDisplayPropertiesKHR {
+ VkDisplayKHR display;
+ const char* displayName;
+ VkExtent2D physicalDimensions;
+ VkExtent2D physicalResolution;
+ VkSurfaceTransformFlagsKHR supportedTransforms;
+ VkBool32 planeReorderPossible;
+ VkBool32 persistentContent;
+} VkDisplayPropertiesKHR;
+
+typedef struct VkDisplayModeParametersKHR {
+ VkExtent2D visibleRegion;
+ uint32_t refreshRate;
+} VkDisplayModeParametersKHR;
+
+typedef struct VkDisplayModePropertiesKHR {
+ VkDisplayModeKHR displayMode;
+ VkDisplayModeParametersKHR parameters;
+} VkDisplayModePropertiesKHR;
+
+typedef struct VkDisplayModeCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplayModeCreateFlagsKHR flags;
+ VkDisplayModeParametersKHR parameters;
+} VkDisplayModeCreateInfoKHR;
+
+typedef struct VkDisplayPlaneCapabilitiesKHR {
+ VkDisplayPlaneAlphaFlagsKHR supportedAlpha;
+ VkOffset2D minSrcPosition;
+ VkOffset2D maxSrcPosition;
+ VkExtent2D minSrcExtent;
+ VkExtent2D maxSrcExtent;
+ VkOffset2D minDstPosition;
+ VkOffset2D maxDstPosition;
+ VkExtent2D minDstExtent;
+ VkExtent2D maxDstExtent;
+} VkDisplayPlaneCapabilitiesKHR;
+
+typedef struct VkDisplayPlanePropertiesKHR {
+ VkDisplayKHR currentDisplay;
+ uint32_t currentStackIndex;
+} VkDisplayPlanePropertiesKHR;
+
+typedef struct VkDisplaySurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkDisplaySurfaceCreateFlagsKHR flags;
+ VkDisplayModeKHR displayMode;
+ uint32_t planeIndex;
+ uint32_t planeStackIndex;
+ VkSurfaceTransformFlagBitsKHR transform;
+ float globalAlpha;
+ VkDisplayPlaneAlphaFlagBitsKHR alphaMode;
+ VkExtent2D imageExtent;
+} VkDisplaySurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR*pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount,
+ VkDisplayPlanePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t planeIndex,
+ uint32_t* pDisplayCount,
+ VkDisplayKHR* pDisplays);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ uint32_t* pPropertyCount,
+ VkDisplayModePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayKHR display,
+ const VkDisplayModeCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDisplayModeKHR* pMode);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR(
+ VkPhysicalDevice physicalDevice,
+ VkDisplayModeKHR mode,
+ uint32_t planeIndex,
+ VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
+ VkInstance instance,
+ const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+
+#define VK_KHR_display_swapchain 1
+#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9
+#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain"
+
+typedef struct VkDisplayPresentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkRect2D srcRect;
+ VkRect2D dstRect;
+ VkBool32 persistent;
+} VkDisplayPresentInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(
+ VkDevice device,
+ uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkSwapchainKHR* pSwapchains);
+#endif
+
+#ifdef VK_USE_PLATFORM_XLIB_KHR
+#define VK_KHR_xlib_surface 1
+#include <X11/Xlib.h>
+
+#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6
+#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface"
+
+typedef VkFlags VkXlibSurfaceCreateFlagsKHR;
+
+typedef struct VkXlibSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkXlibSurfaceCreateFlagsKHR flags;
+ Display* dpy;
+ Window window;
+} VkXlibSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
+ VkInstance instance,
+ const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ Display* dpy,
+ VisualID visualID);
+#endif
+#endif /* VK_USE_PLATFORM_XLIB_KHR */
+
+#ifdef VK_USE_PLATFORM_XCB_KHR
+#define VK_KHR_xcb_surface 1
+#include <xcb/xcb.h>
+
+#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6
+#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
+
+typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
+
+typedef struct VkXcbSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkXcbSurfaceCreateFlagsKHR flags;
+ xcb_connection_t* connection;
+ xcb_window_t window;
+} VkXcbSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
+ VkInstance instance,
+ const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ xcb_connection_t* connection,
+ xcb_visualid_t visual_id);
+#endif
+#endif /* VK_USE_PLATFORM_XCB_KHR */
+
+#ifdef VK_USE_PLATFORM_WAYLAND_KHR
+#define VK_KHR_wayland_surface 1
+#include <wayland-client.h>
+
+#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 5
+#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface"
+
+typedef VkFlags VkWaylandSurfaceCreateFlagsKHR;
+
+typedef struct VkWaylandSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkWaylandSurfaceCreateFlagsKHR flags;
+ struct wl_display* display;
+ struct wl_surface* surface;
+} VkWaylandSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
+ VkInstance instance,
+ const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ struct wl_display* display);
+#endif
+#endif /* VK_USE_PLATFORM_WAYLAND_KHR */
+
+#ifdef VK_USE_PLATFORM_MIR_KHR
+#define VK_KHR_mir_surface 1
+#include <mir_toolkit/client_types.h>
+
+#define VK_KHR_MIR_SURFACE_SPEC_VERSION 4
+#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface"
+
+typedef VkFlags VkMirSurfaceCreateFlagsKHR;
+
+typedef struct VkMirSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkMirSurfaceCreateFlagsKHR flags;
+ MirConnection* connection;
+ MirSurface* mirSurface;
+} VkMirSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
+ VkInstance instance,
+ const VkMirSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ MirConnection* connection);
+#endif
+#endif /* VK_USE_PLATFORM_MIR_KHR */
+
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_KHR_android_surface 1
+#include <android/native_window.h>
+
+#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
+#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface"
+
+typedef VkFlags VkAndroidSurfaceCreateFlagsKHR;
+
+typedef struct VkAndroidSurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkAndroidSurfaceCreateFlagsKHR flags;
+ ANativeWindow* window;
+} VkAndroidSurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
+ VkInstance instance,
+ const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+#endif
+#endif /* VK_USE_PLATFORM_ANDROID_KHR */
+
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+#define VK_KHR_win32_surface 1
+#include <windows.h>
+
+#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 5
+#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface"
+
+typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
+
+typedef struct VkWin32SurfaceCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkWin32SurfaceCreateFlagsKHR flags;
+ HINSTANCE hinstance;
+ HWND hwnd;
+} VkWin32SurfaceCreateInfoKHR;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
+typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
+ VkInstance instance,
+ const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface);
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex);
+#endif
+#endif /* VK_USE_PLATFORM_WIN32_KHR */
+
+#define VK_EXT_debug_report 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)
+
+#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 1
+#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report"
+
+
+typedef enum VkDebugReportObjectTypeEXT {
+ VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10,
+ VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23,
+ VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = 28,
+} VkDebugReportObjectTypeEXT;
+
+typedef enum VkDebugReportErrorEXT {
+ VK_DEBUG_REPORT_ERROR_NONE_EXT = 0,
+ VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT = 1,
+} VkDebugReportErrorEXT;
+
+
+typedef enum VkDebugReportFlagBitsEXT {
+ VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001,
+ VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002,
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008,
+ VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010,
+} VkDebugReportFlagBitsEXT;
+typedef VkFlags VkDebugReportFlagsEXT;
+
+typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)(
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage,
+ void* pUserData);
+
+
+typedef struct VkDebugReportCallbackCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkDebugReportFlagsEXT flags;
+ PFN_vkDebugReportCallbackEXT pfnCallback;
+ void* pUserData;
+} VkDebugReportCallbackCreateInfoEXT;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback);
+typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
+ VkInstance instance,
+ const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDebugReportCallbackEXT* pCallback);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
+ VkInstance instance,
+ VkDebugReportCallbackEXT callback,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
+ VkInstance instance,
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+# Copyright 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+headerLoc=../third_party/vulkan/vulkan.h
+outFile=tempVkInterface
+
+if [ ! -e "$outFile" ] ; then
+ echo 'I AM HERE'
+ touch "$outFile"
+fi
+
+chmod 700 $outFile
+
+echo '// *******************************************' > $outFile
+echo '// Place these lines into GrVkInterface.cpp::validate' >> $outFile
+echo '// *******************************************' >> $outFile
+sed -n 's/^VKAPI_ATTR \(VkResult\|void\) VKAPI_CALL vk\([a-zA-Z]*\).*/NULL == fFunctions.f\2 ||/p' $headerLoc >> $outFile
+sed -i '1,/NULL/ s/^NULL/if (NULL/' $outFile
+sed -i '5,$ s/^/ /' $outFile
+sed -i '$ s/ ||/) {/' $outFile
+
+echo '' >> $outFile
+echo '// *******************************************' >> $outFile
+echo '// Place these lines into GrVkInterface.h' >> $outFile
+echo '// *******************************************' >> $outFile
+sed -n 's/^VKAPI_ATTR \(VkResult\|void\) VKAPI_CALL vk\([a-zA-Z]*\).*/VkPtr<PFN_vk\2> f\2;/p' $headerLoc >> $outFile
+
+echo '' >> $outFile
+echo '// *******************************************' >> $outFile
+echo '// Place these lines into GrVkInterface.cpp::GrVKCreateInterface' >> $outFile
+echo '// *******************************************' >> $outFile
+sed -n 's/^VKAPI_ATTR \(VkResult\|void\) VKAPI_CALL vk\([a-zA-Z]*\).*/GET_PROC(\2);/p' $headerLoc >> $outFile
+