If the structure is present in pNext, it's used instead of flags.
Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24392>
if (!pipeline)
continue;
- if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
+ const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
+ if (create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)
continue;
result = radv_sqtt_reloc_graphics_shaders(device, radv_pipeline_to_graphics(pipeline));
if (!pipeline)
continue;
- if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
+ const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
+ if (create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)
continue;
result = radv_register_pipeline(device, pipeline);
}
bool
-radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags flags)
+radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags2KHR flags)
{
- return (flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) ||
+ return (flags & VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) ||
(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS) || device->keep_shader_info;
}
bool
-radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags flags)
+radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags2KHR flags)
{
- return (flags & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR) ||
+ return (flags & VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR) ||
(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS) || device->keep_shader_info;
}
struct radv_pipeline_key
radv_generate_pipeline_key(const struct radv_device *device, const VkPipelineShaderStageCreateInfo *stages,
- const unsigned num_stages, VkPipelineCreateFlags flags, const void *pNext)
+ const unsigned num_stages, VkPipelineCreateFlags2KHR flags, const void *pNext)
{
struct radv_pipeline_key key;
memset(&key, 0, sizeof(key));
- if (flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
+ if (flags & VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR)
key.optimisations_disabled = 1;
key.disable_aniso_single_level =
}
}
- const uint32_t pipeline_flags =
- pCreateInfo->flags &
- (VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR | VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR |
- VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR |
- VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR |
- VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR |
- VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR | VK_PIPELINE_CREATE_LIBRARY_BIT_KHR);
+ const uint64_t pipeline_flags =
+ radv_get_pipeline_create_flags(pCreateInfo) &
+ (VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR | VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR |
+ VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR |
+ VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR |
+ VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR |
+ VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR | VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR);
_mesa_sha1_update(&ctx, &pipeline_flags, 4);
_mesa_sha1_update(&ctx, &flags, 4);
struct radv_pipeline_cache_object *pipeline_obj = container_of(object, struct radv_pipeline_cache_object, base);
- bool is_library = pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
+ bool is_library = pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR;
bool complete = true;
unsigned idx = 0;
goto done;
}
- if (pipeline->base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
+ if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
return VK_PIPELINE_COMPILE_REQUIRED;
int64_t stage_start = os_time_get_nano();
}
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_COMPUTE);
- pipeline->base.create_flags = pCreateInfo->flags;
+ pipeline->base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
pipeline->base.is_internal = _cache == device->meta_state.cache;
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
result = r;
pPipelines[i] = VK_NULL_HANDLE;
- if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
+ VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
+ if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
break;
}
}
return !!subpass->fragment_shading_rate_attachment;
}
- return (pipeline->base.create_flags & VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) != 0;
+ return (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) != 0;
}
static void
*
* "However, in the specific case that a final link is being
* performed between stages and
- * `VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT` is specified,
+ * `VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT` is specified,
* the application can override the pipeline layout with one that is
* compatible with that union but does not have the
* `VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT` flag set,
*
* In that case discard whatever was imported before.
*/
- if (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT &&
+ if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT &&
!pipeline_layout->independent_sets) {
radv_pipeline_layout_finish(device, layout);
radv_pipeline_layout_init(device, layout, false /* independent_sets */);
}
}
- return (pipeline->base.create_flags & VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
+ return (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
}
static void
{
const VkPipelineLibraryCreateInfoKHR *libs_info =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
- const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
+ const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
/* Nothing to load if no libs are imported. */
if (!libs_info)
bool skip_shaders_cache = false;
VkResult result = VK_SUCCESS;
const bool retain_shaders =
- !!(pipeline->base.create_flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT);
+ !!(pipeline->base.create_flags & VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT);
struct radv_retained_shaders *retained_shaders = NULL;
int64_t pipeline_start = os_time_get_nano();
*/
if (fast_linking_enabled || keep_executable_info) {
skip_shaders_cache = true;
- } else if ((pipeline->base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) && retain_shaders) {
+ } else if ((pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR) && retain_shaders) {
for (uint32_t i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
if (stages[i].entrypoint && !stages[i].spirv.size) {
skip_shaders_cache = true;
goto done;
}
- if (pipeline->base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
+ if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
return VK_PIPELINE_COMPILE_REQUIRED;
if (retain_shaders) {
if (!libs_info)
return false;
- return !(pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT);
+ return !(pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT);
}
bool
/* If we have libraries, import them first. */
if (libs_info) {
- const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
+ const bool link_optimize =
+ (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
assert(pipeline_lib->type == RADV_PIPELINE_GRAPHICS_LIB);
/* If we have link time optimization, all libraries must be created with
- * VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT.
+ * VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT.
*/
assert(!link_optimize || gfx_pipeline_lib->base.retain_shaders);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_GRAPHICS);
- pipeline->base.create_flags = pCreateInfo->flags;
+ pipeline->base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
pipeline->base.is_internal = _cache == device->meta_state.cache;
result = radv_graphics_pipeline_init(pipeline, device, cache, pCreateInfo, extra);
pipeline->base.last_vgt_api_stage = MESA_SHADER_NONE;
pipeline->base.retain_shaders =
- (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT) != 0;
+ (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT) != 0;
pipeline->lib_flags = needed_lib_flags;
radv_pipeline_layout_init(device, pipeline_layout, false);
/* If we have libraries, import them first. */
if (libs_info) {
const bool link_optimize =
- (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
+ (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_GRAPHICS_LIB);
- pipeline->base.base.create_flags = pCreateInfo->flags;
+ pipeline->base.base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
pipeline->mem_ctx = ralloc_context(NULL);
unsigned i = 0;
for (; i < count; i++) {
+ const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
VkResult r;
- if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
+ if (create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR) {
r = radv_graphics_lib_pipeline_create(_device, pipelineCache, &pCreateInfos[i], pAllocator, &pPipelines[i]);
} else {
r = radv_graphics_pipeline_create(_device, pipelineCache, &pCreateInfos[i], NULL, pAllocator, &pPipelines[i]);
result = r;
pPipelines[i] = VK_NULL_HANDLE;
- if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
+ if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
break;
}
}
struct radv_ray_tracing_group *groups)
{
bool capture_replay =
- pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
+ pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
for (unsigned i = 0; i < pCreateInfo->groupCount; ++i) {
const VkRayTracingShaderGroupCreateInfoKHR *group_info = &pCreateInfo->pGroups[i];
switch (group_info->type) {
bool dump_shader = radv_can_dump_shader(device, shaders[0], false);
bool replayable =
- pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
+ pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
/* Compile NIR shader to AMD assembly. */
binary = radv_shader_nir_to_asm(device, stage, shaders, num_shaders, pipeline_key, keep_executable_info,
const struct radv_pipeline_key *key, struct radv_ray_tracing_pipeline *pipeline,
struct radv_serialized_shader_arena_block *capture_replay_handles)
{
- if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
+ if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
return VK_PIPELINE_COMPILE_REQUIRED;
VkResult result = VK_SUCCESS;
}
}
- if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
+ if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)
return VK_SUCCESS;
/* create traversal shader */
return VK_ERROR_OUT_OF_HOST_MEMORY;
radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_RAY_TRACING);
- pipeline->base.base.create_flags = pCreateInfo->flags;
+ pipeline->base.base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
pipeline->stage_count = local_create_info.stageCount;
pipeline->group_count = local_create_info.groupCount;
pipeline->stages = stages;
goto fail;
}
- if (!(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)) {
+ if (!(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)) {
compute_rt_stack_size(pCreateInfo, pipeline);
compile_rt_prolog(device, pipeline);
result = r;
pPipelines[i] = VK_NULL_HANDLE;
- if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
+ const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
+ if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
break;
}
}
void radv_compute_dispatch(struct radv_cmd_buffer *cmd_buffer, const struct radv_dispatch_info *info);
+static VkPipelineCreateFlagBits2KHR
+radv_get_pipeline_create_flags(const void *pCreateInfo)
+{
+ const VkBaseInStructure *base = pCreateInfo;
+ const VkPipelineCreateFlags2CreateInfoKHR *flags2 =
+ vk_find_struct_const(base->pNext, PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR);
+
+ if (flags2)
+ return flags2->flags;
+
+ switch (((VkBaseInStructure *)pCreateInfo)->sType) {
+ case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO: {
+ const VkGraphicsPipelineCreateInfo *create_info = (VkGraphicsPipelineCreateInfo *)pCreateInfo;
+ return create_info->flags;
+ }
+ case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO: {
+ const VkComputePipelineCreateInfo *create_info = (VkComputePipelineCreateInfo *)pCreateInfo;
+ return create_info->flags;
+ }
+ case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR: {
+ const VkRayTracingPipelineCreateInfoKHR *create_info = (VkRayTracingPipelineCreateInfoKHR *)pCreateInfo;
+ return create_info->flags;
+ }
+ default:
+ unreachable("invalid pCreateInfo pipeline struct");
+ }
+
+ return 0;
+}
+
struct radv_image;
struct radv_image_view;
struct vk_object_base base;
enum radv_pipeline_type type;
- VkPipelineCreateFlags create_flags;
+ VkPipelineCreateFlags2KHR create_flags;
struct vk_pipeline_cache_object *cache_object;
struct radv_pipeline_key radv_generate_pipeline_key(const struct radv_device *device,
const VkPipelineShaderStageCreateInfo *stages,
- const unsigned num_stages, VkPipelineCreateFlags flags,
+ const unsigned num_stages, VkPipelineCreateFlags2KHR flags,
const void *pNext);
void radv_pipeline_init(struct radv_device *device, struct radv_pipeline *pipeline, enum radv_pipeline_type type);
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline);
-bool radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags flags);
-bool radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags flags);
+bool radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags2KHR flags);
+bool radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags2KHR flags);
VkPipelineShaderStageCreateInfo *radv_copy_shader_stage_create_info(struct radv_device *device, uint32_t stageCount,
const VkPipelineShaderStageCreateInfo *pStages,
* Global variables for an RT pipeline
*/
struct rt_variables {
- const VkPipelineCreateFlags flags;
+ const VkPipelineCreateFlags2KHR flags;
/* idx of the next shader to run in the next iteration of the main loop.
* During traversal, idx is used to store the SBT index and will contain
};
static struct rt_variables
-create_rt_variables(nir_shader *shader, const VkPipelineCreateFlags flags)
+create_rt_variables(nir_shader *shader, const VkPipelineCreateFlags2KHR flags)
{
struct rt_variables vars = {
.flags = flags,
nir_ssa_def *should_return = nir_test_mask(&b_shader, nir_load_var(&b_shader, vars->cull_mask_and_flags),
SpvRayFlagsSkipClosestHitShaderKHRMask);
- if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR)) {
+ if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR)) {
should_return = nir_ior(&b_shader, should_return,
nir_ieq_imm(&b_shader, nir_load_var(&b_shader, vars->shader_va), 0));
}
nir_ssa_def *miss_index = nir_load_var(&b_shader, vars->miss_index);
load_sbt_entry(&b_shader, vars, miss_index, SBT_MISS, SBT_RECURSIVE_PTR);
- if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR)) {
+ if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR)) {
/* In case of a NULL miss shader, do nothing and just return. */
nir_push_if(&b_shader, nir_ieq_imm(&b_shader, nir_load_var(&b_shader, vars->shader_va), 0));
insert_rt_return(&b_shader, vars);
{
nir_ssa_def *sbt_idx = nir_load_var(b, vars->idx);
- if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
+ if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
nir_push_if(b, nir_ine_imm(b, sbt_idx, 0));
for (unsigned i = 0; i < data->pipeline->group_count; ++i) {
ralloc_free(nir_stage);
}
- if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
+ if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
nir_pop_if(b, NULL);
}
nir_store_var(b, data->vars->ahit_accept, nir_imm_false(b), 0x1);
nir_store_var(b, data->vars->ahit_terminate, nir_imm_false(b), 0x1);
- if (!(data->vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
+ if (!(data->vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
nir_push_if(b, nir_ine_imm(b, nir_load_var(b, inner_vars.idx), 0));
for (unsigned i = 0; i < data->pipeline->group_count; ++i) {
ralloc_free(nir_stage);
}
- if (!(data->vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
+ if (!(data->vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
nir_pop_if(b, NULL);
nir_push_if(b, nir_load_var(b, data->vars->ahit_accept));
radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_pipeline *pipeline,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, const struct radv_pipeline_key *key)
{
+ const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(pCreateInfo);
+
/* Create the traversal shader as an intersection shader to prevent validation failures due to
* invalid variable modes.*/
nir_builder b = radv_meta_init_shader(device, MESA_SHADER_INTERSECTION, "rt_traversal");
b.shader->info.workgroup_size[0] = 8;
b.shader->info.workgroup_size[1] = device->physical_device->rt_wave_size == 64 ? 8 : 4;
b.shader->info.shared_size = device->physical_device->rt_wave_size * MAX_STACK_ENTRY_COUNT * sizeof(uint32_t);
- struct rt_variables vars = create_rt_variables(b.shader, pCreateInfo->flags);
+ struct rt_variables vars = create_rt_variables(b.shader, create_flags);
/* Register storage for hit attributes */
nir_variable *hit_attribs[RADV_MAX_HIT_ATTRIB_SIZE / sizeof(uint32_t)];
.stack_base = 0,
.stack_store_cb = store_stack_entry,
.stack_load_cb = load_stack_entry,
- .aabb_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR)
+ .aabb_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR)
? NULL
: handle_candidate_aabb,
- .triangle_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR)
+ .triangle_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR)
? NULL
: handle_candidate_triangle,
.data = &data,
{
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
- struct rt_variables vars = create_rt_variables(shader, pCreateInfo->flags);
+ const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(pCreateInfo);
+
+ struct rt_variables vars = create_rt_variables(shader, create_flags);
lower_rt_instructions(shader, &vars, 0);
if (stack_size) {