VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)
-VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShader)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 2,
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 3,
VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 4,
- VK_STRUCTURE_TYPE_SHADER_CREATE_INFO = 5,
VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 6,
VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 7,
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 8,
typedef VkFlags VkImageViewCreateFlags;
typedef VkFlags VkShaderModuleCreateFlags;
typedef VkFlags VkPipelineCacheCreateFlags;
-typedef VkFlags VkShaderCreateFlags;
typedef enum VkPipelineCreateFlagBits {
VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
const uint32_t* pCode;
} VkShaderModuleCreateInfo;
-typedef struct {
- VkStructureType sType;
- const void* pNext;
- VkShaderModule module;
- const char* pName;
- VkShaderCreateFlags flags;
- VkShaderStage stage;
-} VkShaderCreateInfo;
-
typedef struct VkPipelineCacheCreateInfo {
VkStructureType sType;
const void* pNext;
const void* pNext;
VkPipelineShaderStageCreateFlags flags;
VkShaderStage stage;
- VkShader shader;
+ VkShaderModule module;
+ const char* pName;
const VkSpecializationInfo* pSpecializationInfo;
} VkPipelineShaderStageCreateInfo;
typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
-typedef VkResult (VKAPI_PTR *PFN_vkCreateShader)(VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader);
-typedef void (VKAPI_PTR *PFN_vkDestroyShader)(VkDevice device, VkShader shader);
typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);
typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator);
typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);
VkShaderModule shaderModule,
const VkAllocationCallbacks* pAllocator);
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateShader(
- VkDevice device,
- const VkShaderCreateInfo* pCreateInfo,
- VkShader* pShader);
-
-VKAPI_ATTR void VKAPI_CALL vkDestroyShader(
- VkDevice device,
- VkShader shader);
-
VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
VkDevice device,
const VkPipelineCacheCreateInfo* pCreateInfo,
* to provide GLSL source for the vertex shader so that the compiler
* does not dead-code our inputs.
*/
- struct anv_shader_module vsm = {
+ struct anv_shader_module vs = {
.nir = build_nir_vertex_shader(false),
};
- struct anv_shader_module fsm_2d = {
+ struct anv_shader_module fs_2d = {
.nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_2D),
};
- struct anv_shader_module fsm_3d = {
+ struct anv_shader_module fs_3d = {
.nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_3D),
};
- VkShader vs;
- anv_CreateShader(anv_device_to_handle(device),
- &(VkShaderCreateInfo) {
- .sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO,
- .module = anv_shader_module_to_handle(&vsm),
- .pName = "main",
- }, &vs);
-
- VkShader fs_2d;
- anv_CreateShader(anv_device_to_handle(device),
- &(VkShaderCreateInfo) {
- .sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO,
- .module = anv_shader_module_to_handle(&fsm_2d),
- .pName = "main",
- }, &fs_2d);
-
- VkShader fs_3d;
- anv_CreateShader(anv_device_to_handle(device),
- &(VkShaderCreateInfo) {
- .sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO,
- .module = anv_shader_module_to_handle(&fsm_3d),
- .pName = "main",
- }, &fs_3d);
-
VkPipelineVertexInputStateCreateInfo vi_create_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.vertexBindingDescriptionCount = 2,
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_VERTEX,
- .shader = vs,
+ .module = anv_shader_module_to_handle(&vs),
+ .pName = "main",
.pSpecializationInfo = NULL
}, {
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_FRAGMENT,
- .shader = VK_NULL_HANDLE, /* TEMPLATE VALUE! FILL ME IN! */
+ .module = VK_NULL_HANDLE, /* TEMPLATE VALUE! FILL ME IN! */
+ .pName = "main",
.pSpecializationInfo = NULL
},
};
.use_rectlist = true
};
- pipeline_shader_stages[1].shader = fs_2d;
+ pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_2d);
anv_graphics_pipeline_create(anv_device_to_handle(device),
&vk_pipeline_info, &anv_pipeline_info,
NULL, &device->meta_state.blit.pipeline_2d_src);
- pipeline_shader_stages[1].shader = fs_3d;
+ pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_3d);
anv_graphics_pipeline_create(anv_device_to_handle(device),
&vk_pipeline_info, &anv_pipeline_info,
NULL, &device->meta_state.blit.pipeline_3d_src);
- anv_DestroyShader(anv_device_to_handle(device), vs);
- anv_DestroyShader(anv_device_to_handle(device), fs_2d);
- anv_DestroyShader(anv_device_to_handle(device), fs_3d);
- ralloc_free(vsm.nir);
- ralloc_free(fsm_2d.nir);
- ralloc_free(fsm_3d.nir);
+ ralloc_free(vs.nir);
+ ralloc_free(fs_2d.nir);
+ ralloc_free(fs_3d.nir);
}
static void
anv_free2(&device->alloc, pAllocator, module);
}
-VkResult anv_CreateShader(
- VkDevice _device,
- const VkShaderCreateInfo* pCreateInfo,
- VkShader* pShader)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
- struct anv_shader *shader;
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
- assert(pCreateInfo->flags == 0);
-
- const char *name = pCreateInfo->pName ? pCreateInfo->pName : "main";
- size_t name_len = strlen(name);
-
- shader = anv_alloc(&device->alloc, sizeof(*shader) + name_len + 1, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (shader == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- shader->module = module,
- memcpy(shader->entrypoint, name, name_len + 1);
-
- *pShader = anv_shader_to_handle(shader);
-
- return VK_SUCCESS;
-}
-
-void anv_DestroyShader(
- VkDevice _device,
- VkShader _shader)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_shader, shader, _shader);
-
- anv_free(&device->alloc, shader);
-}
-
#define SPIR_V_MAGIC_NUMBER 0x07230203
static const gl_shader_stage vk_shader_stage_to_mesa_stage[] = {
*/
static nir_shader *
anv_shader_compile_to_nir(struct anv_device *device,
- struct anv_shader *shader, VkShaderStage vk_stage)
+ struct anv_shader_module *module,
+ const char *entrypoint_name, VkShaderStage vk_stage)
{
- if (strcmp(shader->entrypoint, "main") != 0) {
+ if (strcmp(entrypoint_name, "main") != 0) {
anv_finishme("Multiple shaders per module not really supported");
}
compiler->glsl_compiler_options[stage].NirOptions;
nir_shader *nir;
- if (shader->module->nir) {
+ if (module->nir) {
/* Some things such as our meta clear/blit code will give us a NIR
* shader directly. In that case, we just ignore the SPIR-V entirely
* and just use the NIR shader */
- nir = shader->module->nir;
+ nir = module->nir;
nir->options = nir_options;
} else {
- uint32_t *spirv = (uint32_t *) shader->module->data;
+ uint32_t *spirv = (uint32_t *) module->data;
assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
- assert(shader->module->size % 4 == 0);
+ assert(module->size % 4 == 0);
- nir = spirv_to_nir(spirv, shader->module->size / 4, stage, nir_options);
+ nir = spirv_to_nir(spirv, module->size / 4, stage, nir_options);
}
nir_validate_shader(nir);
*/
nir_function_impl *entrypoint = NULL;
nir_foreach_overload(nir, overload) {
- if (strcmp(shader->entrypoint, overload->function->name) == 0 &&
+ if (strcmp(entrypoint_name, overload->function->name) == 0 &&
overload->impl) {
assert(entrypoint == NULL);
entrypoint = overload->impl;
static nir_shader *
anv_pipeline_compile(struct anv_pipeline *pipeline,
- struct anv_shader *shader,
+ struct anv_shader_module *module,
+ const char *entrypoint,
VkShaderStage stage,
struct brw_stage_prog_data *prog_data)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- nir_shader *nir = anv_shader_compile_to_nir(pipeline->device, shader, stage);
+ nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
+ module, entrypoint, stage);
if (nir == NULL)
return NULL;
static VkResult
anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader *shader)
+ struct anv_shader_module *module,
+ const char *entrypoint)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
memset(prog_data, 0, sizeof(*prog_data));
- nir_shader *nir = anv_pipeline_compile(pipeline, shader,
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
VK_SHADER_STAGE_VERTEX,
&prog_data->base.base);
if (nir == NULL)
void *mem_ctx = ralloc_context(NULL);
- if (shader->module->nir == NULL)
+ if (module->nir == NULL)
ralloc_steal(mem_ctx, nir);
prog_data->inputs_read = nir->info.inputs_read;
static VkResult
anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader *shader)
+ struct anv_shader_module *module,
+ const char *entrypoint)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
memset(prog_data, 0, sizeof(*prog_data));
- nir_shader *nir = anv_pipeline_compile(pipeline, shader,
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
VK_SHADER_STAGE_GEOMETRY,
&prog_data->base.base);
if (nir == NULL)
void *mem_ctx = ralloc_context(NULL);
- if (shader->module->nir == NULL)
+ if (module->nir == NULL)
ralloc_steal(mem_ctx, nir);
brw_compute_vue_map(&pipeline->device->info,
static VkResult
anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *info,
- struct anv_shader *shader)
+ struct anv_shader_module *module,
+ const char *entrypoint)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
prog_data->binding_table.render_target_start = 0;
- nir_shader *nir = anv_pipeline_compile(pipeline, shader,
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
VK_SHADER_STAGE_FRAGMENT,
&prog_data->base);
if (nir == NULL)
void *mem_ctx = ralloc_context(NULL);
- if (shader->module->nir == NULL)
+ if (module->nir == NULL)
ralloc_steal(mem_ctx, nir);
unsigned code_size;
VkResult
anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
const VkComputePipelineCreateInfo *info,
- struct anv_shader *shader)
+ struct anv_shader_module *module,
+ const char *entrypoint)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
memset(prog_data, 0, sizeof(*prog_data));
- nir_shader *nir = anv_pipeline_compile(pipeline, shader,
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
VK_SHADER_STAGE_COMPUTE,
&prog_data->base);
if (nir == NULL)
void *mem_ctx = ralloc_context(NULL);
- if (shader->module->nir == NULL)
+ if (module->nir == NULL)
ralloc_steal(mem_ctx, nir);
unsigned code_size;
pipeline->total_scratch = 0;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
- ANV_FROM_HANDLE(anv_shader, shader, pCreateInfo->pStages[i].shader);
+ ANV_FROM_HANDLE(anv_shader_module, module,
+ pCreateInfo->pStages[i].module);
+ const char *entrypoint = pCreateInfo->pStages[i].pName;
switch (pCreateInfo->pStages[i].stage) {
case VK_SHADER_STAGE_VERTEX:
- anv_pipeline_compile_vs(pipeline, pCreateInfo, shader);
+ anv_pipeline_compile_vs(pipeline, pCreateInfo, module, entrypoint);
break;
case VK_SHADER_STAGE_GEOMETRY:
- anv_pipeline_compile_gs(pipeline, pCreateInfo, shader);
+ anv_pipeline_compile_gs(pipeline, pCreateInfo, module, entrypoint);
break;
case VK_SHADER_STAGE_FRAGMENT:
- anv_pipeline_compile_fs(pipeline, pCreateInfo, shader);
+ anv_pipeline_compile_fs(pipeline, pCreateInfo, module, entrypoint);
break;
default:
anv_finishme("Unsupported shader stage");