struct radv_pipeline_cache_object *pipeline_obj = container_of(object, struct radv_pipeline_cache_object, base);
- bool is_library = pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
+ bool is_library = pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
bool complete = true;
unsigned idx = 0;
}
static struct radv_pipeline_key
-radv_generate_compute_pipeline_key(const struct radv_device *device, const VkComputePipelineCreateInfo *pCreateInfo)
+radv_generate_compute_pipeline_key(const struct radv_device *device, const struct radv_compute_pipeline *pipeline,
+ const VkComputePipelineCreateInfo *pCreateInfo)
{
- return radv_generate_pipeline_key(device, &pCreateInfo->stage, 1, pCreateInfo->flags, pCreateInfo->pNext);
+ return radv_generate_pipeline_key(device, &pCreateInfo->stage, 1, pipeline->base.create_flags, pCreateInfo->pNext);
}
void
radv_compute_pipeline_compile(struct radv_compute_pipeline *pipeline, struct radv_pipeline_layout *pipeline_layout,
struct radv_device *device, struct vk_pipeline_cache *cache,
const struct radv_pipeline_key *pipeline_key,
- const VkPipelineShaderStageCreateInfo *pStage, const VkPipelineCreateFlags flags,
+ const VkPipelineShaderStageCreateInfo *pStage,
const VkPipelineCreationFeedbackCreateInfo *creation_feedback)
{
struct radv_shader_binary *cs_binary = NULL;
unsigned char hash[20];
- bool keep_executable_info = radv_pipeline_capture_shaders(device, flags);
- bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, flags);
+ bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.create_flags);
+ bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.create_flags);
struct radv_shader_stage cs_stage = {0};
VkPipelineCreationFeedback pipeline_feedback = {
.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
goto done;
}
- if (flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
+ if (pipeline->base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
return VK_PIPELINE_COMPILE_REQUIRED;
int64_t stage_start = os_time_get_nano();
}
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_COMPUTE);
+ pipeline->base.create_flags = pCreateInfo->flags;
pipeline->base.is_internal = _cache == device->meta_state.cache;
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
- struct radv_pipeline_key key = radv_generate_compute_pipeline_key(device, pCreateInfo);
+ struct radv_pipeline_key key = radv_generate_compute_pipeline_key(device, pipeline, pCreateInfo);
result = radv_compute_pipeline_compile(pipeline, pipeline_layout, device, cache, &key, &pCreateInfo->stage,
- pCreateInfo->flags, creation_feedback);
+ creation_feedback);
if (result != VK_SUCCESS) {
radv_pipeline_destroy(device, &pipeline->base, pAllocator);
return result;
}
static bool
-radv_pipeline_uses_vrs_attachment(const VkGraphicsPipelineCreateInfo *pCreateInfo,
+radv_pipeline_uses_vrs_attachment(const struct radv_graphics_pipeline *pipeline,
const struct vk_graphics_pipeline_state *state)
{
VK_FROM_HANDLE(vk_render_pass, render_pass, state->rp->render_pass);
return !!subpass->fragment_shading_rate_attachment;
}
- return (pCreateInfo->flags & VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) != 0;
+ return (pipeline->base.create_flags & VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) != 0;
}
static void
*
* In that case discard whatever was imported before.
*/
- if (pCreateInfo->flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT &&
+ if (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT &&
!pipeline_layout->independent_sets) {
radv_pipeline_layout_finish(device, layout);
radv_pipeline_layout_init(device, layout, false /* independent_sets */);
}
static bool
-radv_pipeline_uses_ds_feedback_loop(const VkGraphicsPipelineCreateInfo *pCreateInfo,
+radv_pipeline_uses_ds_feedback_loop(const struct radv_graphics_pipeline *pipeline,
const struct vk_graphics_pipeline_state *state)
{
VK_FROM_HANDLE(vk_render_pass, render_pass, state->rp->render_pass);
}
}
- return (pCreateInfo->flags & VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
+ return (pipeline->base.create_flags & VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
}
static void
}
if (states & RADV_DYNAMIC_ATTACHMENT_FEEDBACK_LOOP_ENABLE) {
- bool uses_ds_feedback_loop = radv_pipeline_uses_ds_feedback_loop(pCreateInfo, state);
+ bool uses_ds_feedback_loop = radv_pipeline_uses_ds_feedback_loop(pipeline, state);
dynamic->feedback_loop_aspects =
uses_ds_feedback_loop ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_NONE;
{
const struct radv_physical_device *pdevice = device->physical_device;
struct radv_pipeline_key key = radv_generate_pipeline_key(device, pCreateInfo->pStages, pCreateInfo->stageCount,
- pCreateInfo->flags, pCreateInfo->pNext);
+ pipeline->base.create_flags, pCreateInfo->pNext);
key.lib_flags = lib_flags;
key.has_multiview_view_index = state->rp ? !!state->rp->view_mask : 0;
{
const VkPipelineLibraryCreateInfoKHR *libs_info =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
- const bool link_optimize = (pCreateInfo->flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
+ const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
/* Nothing to load if no libs are imported. */
if (!libs_info)
struct radv_shader_binary *gs_copy_binary = NULL;
struct radv_shader_part_binary *ps_epilog_binary = NULL;
unsigned char hash[20];
- bool keep_executable_info = radv_pipeline_capture_shaders(device, pCreateInfo->flags);
- bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pCreateInfo->flags);
+ bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.create_flags);
+ bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.create_flags);
struct radv_shader_stage stages[MESA_VULKAN_SHADER_STAGES];
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
};
bool skip_shaders_cache = false;
VkResult result = VK_SUCCESS;
- const bool retain_shaders = !!(pCreateInfo->flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT);
+ const bool retain_shaders =
+ !!(pipeline->base.create_flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT);
struct radv_retained_shaders *retained_shaders = NULL;
int64_t pipeline_start = os_time_get_nano();
*/
if (fast_linking_enabled || keep_executable_info) {
skip_shaders_cache = true;
- } else if ((pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) && retain_shaders) {
+ } else if ((pipeline->base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) && retain_shaders) {
for (uint32_t i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
if (stages[i].entrypoint && !stages[i].spirv.size) {
skip_shaders_cache = true;
goto done;
}
- if (pCreateInfo->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
+ if (pipeline->base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
return VK_PIPELINE_COMPILE_REQUIRED;
if (retain_shaders) {
}
static bool
-radv_is_fast_linking_enabled(const VkGraphicsPipelineCreateInfo *pCreateInfo)
+radv_is_fast_linking_enabled(const struct radv_graphics_pipeline *pipeline,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo)
{
const VkPipelineLibraryCreateInfoKHR *libs_info =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
if (!libs_info)
return false;
- return !(pCreateInfo->flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT);
+ return !(pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT);
}
bool
const struct radv_graphics_pipeline_create_info *extra)
{
VkGraphicsPipelineLibraryFlagBitsEXT needed_lib_flags = ALL_GRAPHICS_LIB_FLAGS;
- bool fast_linking_enabled = radv_is_fast_linking_enabled(pCreateInfo);
+ bool fast_linking_enabled = radv_is_fast_linking_enabled(pipeline, pCreateInfo);
struct radv_pipeline_layout pipeline_layout;
struct vk_graphics_pipeline_state state = {0};
VkResult result = VK_SUCCESS;
/* If we have libraries, import them first. */
if (libs_info) {
- const bool link_optimize = (pCreateInfo->flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
+ const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
pipeline->force_vrs_per_vertex = pipeline->base.shaders[pipeline->last_vgt_api_stage]->info.force_vrs_per_vertex;
pipeline->rast_prim = vgt_gs_out_prim_type;
pipeline->uses_out_of_order_rast = state.rs->rasterization_order_amd == VK_RASTERIZATION_ORDER_RELAXED_AMD;
- pipeline->uses_vrs_attachment = radv_pipeline_uses_vrs_attachment(pCreateInfo, &state);
+ pipeline->uses_vrs_attachment = radv_pipeline_uses_vrs_attachment(pipeline, &state);
pipeline->base.push_constant_size = pipeline_layout.push_constant_size;
pipeline->base.dynamic_offset_count = pipeline_layout.dynamic_offset_count;
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_GRAPHICS);
+ pipeline->base.create_flags = pCreateInfo->flags;
pipeline->base.is_internal = _cache == device->meta_state.cache;
result = radv_graphics_pipeline_init(pipeline, device, cache, pCreateInfo, extra);
VkGraphicsPipelineLibraryFlagBitsEXT needed_lib_flags = lib_info ? lib_info->flags : 0;
const VkPipelineLibraryCreateInfoKHR *libs_info =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
- bool fast_linking_enabled = radv_is_fast_linking_enabled(pCreateInfo);
+ bool fast_linking_enabled = radv_is_fast_linking_enabled(&pipeline->base, pCreateInfo);
struct vk_graphics_pipeline_state *state = &pipeline->graphics_state;
struct radv_pipeline_layout *pipeline_layout = &pipeline->layout;
pipeline->base.last_vgt_api_stage = MESA_SHADER_NONE;
pipeline->base.retain_shaders =
- (pCreateInfo->flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT) != 0;
+ (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT) != 0;
pipeline->lib_flags = needed_lib_flags;
radv_pipeline_layout_init(device, pipeline_layout, false);
/* If we have libraries, import them first. */
if (libs_info) {
- const bool link_optimize = (pCreateInfo->flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
+ const bool link_optimize =
+ (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_GRAPHICS_LIB);
+ pipeline->base.base.create_flags = pCreateInfo->flags;
pipeline->mem_ctx = ralloc_context(NULL);
}
static struct radv_pipeline_key
-radv_generate_rt_pipeline_key(const struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo)
+radv_generate_rt_pipeline_key(const struct radv_device *device, const struct radv_ray_tracing_pipeline *pipeline,
+ const VkRayTracingPipelineCreateInfoKHR *pCreateInfo)
{
struct radv_pipeline_key key = radv_generate_pipeline_key(device, pCreateInfo->pStages, pCreateInfo->stageCount,
- pCreateInfo->flags, pCreateInfo->pNext);
+ pipeline->base.base.create_flags, pCreateInfo->pNext);
if (pCreateInfo->pLibraryInfo) {
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
- struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline);
+ RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline_lib);
/* apply shader robustness from merged shaders */
if (library_pipeline->traversal_storage_robustness2)
key.stage_info[MESA_SHADER_INTERSECTION].storage_robustness2 = true;
}
static VkResult
-radv_create_group_handles(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
- struct radv_ray_tracing_stage *stages, struct radv_ray_tracing_group *groups)
+radv_create_group_handles(struct radv_device *device, const struct radv_ray_tracing_pipeline *pipeline,
+ const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, struct radv_ray_tracing_stage *stages,
+ struct radv_ray_tracing_group *groups)
{
- bool capture_replay = pCreateInfo->flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
+ bool capture_replay =
+ pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
for (unsigned i = 0; i < pCreateInfo->groupCount; ++i) {
const VkRayTracingShaderGroupCreateInfoKHR *group_info = &pCreateInfo->pGroups[i];
switch (group_info->type) {
}
static VkResult
-radv_rt_fill_group_info(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
- struct radv_ray_tracing_stage *stages,
+radv_rt_fill_group_info(struct radv_device *device, const struct radv_ray_tracing_pipeline *pipeline,
+ const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, struct radv_ray_tracing_stage *stages,
struct radv_serialized_shader_arena_block *capture_replay_blocks,
struct radv_ray_tracing_group *groups)
{
- VkResult result = radv_create_group_handles(device, pCreateInfo, stages, groups);
+ VkResult result = radv_create_group_handles(device, pipeline, pCreateInfo, stages, groups);
uint32_t idx;
for (idx = 0; idx < pCreateInfo->groupCount; idx++) {
if (pCreateInfo->pLibraryInfo) {
unsigned stage_count = pCreateInfo->stageCount;
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
- RADV_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
- struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline);
+ RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
+ struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline_lib);
for (unsigned j = 0; j < library_pipeline->group_count; ++j) {
struct radv_ray_tracing_group *dst = &groups[idx + j];
static VkResult
radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, const struct radv_pipeline_key *pipeline_key,
- struct radv_shader_stage *stage, uint32_t *stack_size,
- struct radv_serialized_shader_arena_block *replay_block, struct radv_shader **out_shader)
+ const struct radv_ray_tracing_pipeline *pipeline, struct radv_shader_stage *stage,
+ uint32_t *stack_size, struct radv_serialized_shader_arena_block *replay_block,
+ struct radv_shader **out_shader)
{
struct radv_shader_binary *binary;
RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
- bool keep_executable_info = radv_pipeline_capture_shaders(device, pCreateInfo->flags);
- bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pCreateInfo->flags);
+ bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.base.create_flags);
+ bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.base.create_flags);
/* Gather shader info. */
nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
}
bool dump_shader = radv_can_dump_shader(device, shaders[0], false);
- bool replayable = pCreateInfo->flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
+ bool replayable =
+ pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
/* Compile NIR shader to AMD assembly. */
binary = radv_shader_nir_to_asm(device, stage, shaders, num_shaders, pipeline_key, keep_executable_info,
const struct radv_pipeline_key *key, struct radv_ray_tracing_pipeline *pipeline,
struct radv_serialized_shader_arena_block *capture_replay_handles)
{
- if (pCreateInfo->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
+ if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
return VK_PIPELINE_COMPILE_REQUIRED;
VkResult result = VK_SUCCESS;
capture_replay_handles[idx].arena_va ? &capture_replay_handles[idx] : NULL;
struct radv_shader *shader;
- result = radv_rt_nir_to_asm(device, cache, pCreateInfo, key, &stage, &stack_size, replay_block, &shader);
+ result =
+ radv_rt_nir_to_asm(device, cache, pCreateInfo, key, pipeline, &stage, &stack_size, replay_block, &shader);
stages[idx].stack_size = stack_size;
stages[idx].shader = shader ? &shader->base : NULL;
} else {
}
}
- if (pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
+ if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
return VK_SUCCESS;
/* create traversal shader */
.nir = traversal_module.nir,
};
vk_pipeline_hash_shader_stage(&pStage, NULL, traversal_stage.shader_sha1);
- result = radv_rt_nir_to_asm(device, cache, pCreateInfo, key, &traversal_stage, NULL, NULL,
+ result = radv_rt_nir_to_asm(device, cache, pCreateInfo, key, pipeline, &traversal_stage, NULL, NULL,
&pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
return result;
VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
VkResult result;
- bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pCreateInfo->flags);
- bool keep_executable_info = radv_pipeline_capture_shaders(device, pCreateInfo->flags);
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
if (creation_feedback)
return VK_ERROR_OUT_OF_HOST_MEMORY;
radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_RAY_TRACING);
+ pipeline->base.base.create_flags = pCreateInfo->flags;
pipeline->stage_count = local_create_info.stageCount;
pipeline->group_count = local_create_info.groupCount;
pipeline->stages = stages;
pipeline->groups = groups;
- struct radv_pipeline_key key = radv_generate_rt_pipeline_key(device, pCreateInfo);
+ struct radv_pipeline_key key = radv_generate_rt_pipeline_key(device, pipeline, pCreateInfo);
/* cache robustness state for making merged shaders */
if (key.stage_info[MESA_SHADER_INTERSECTION].storage_robustness2)
pipeline->traversal_uniform_robustness2 = true;
radv_rt_fill_stage_info(device, pCreateInfo, stages, &key);
- result = radv_rt_fill_group_info(device, pCreateInfo, stages, capture_replay_blocks, pipeline->groups);
+ result = radv_rt_fill_group_info(device, pipeline, pCreateInfo, stages, capture_replay_blocks, pipeline->groups);
if (result != VK_SUCCESS)
goto fail;
+ bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.base.create_flags);
+ bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.base.create_flags);
+
radv_hash_rt_shaders(pipeline->sha1, pCreateInfo, &key, pipeline->groups,
radv_get_hash_flags(device, keep_statistic_info));
pipeline->base.base.pipeline_hash = *(uint64_t *)pipeline->sha1;
goto fail;
}
- if (!(pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)) {
+ if (!(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)) {
compute_rt_stack_size(pCreateInfo, pipeline);
compile_rt_prolog(device, pipeline);
struct vk_object_base base;
enum radv_pipeline_type type;
+ VkPipelineCreateFlags create_flags;
+
struct vk_pipeline_cache_object *cache_object;
bool is_internal;
.stack_base = 0,
.stack_store_cb = store_stack_entry,
.stack_load_cb = load_stack_entry,
- .aabb_cb =
- (pCreateInfo->flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR) ? NULL : handle_candidate_aabb,
- .triangle_cb = (pCreateInfo->flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR)
+ .aabb_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR)
+ ? NULL
+ : handle_candidate_aabb,
+ .triangle_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR)
? NULL
: handle_candidate_triangle,
.data = &data,