struct anv_device *device = blorp->driver_ctx;
struct anv_shader_bin *bin =
- anv_device_search_for_kernel(device, device->blorp_cache,
+ anv_device_search_for_kernel(device, device->internal_cache,
key, key_size, NULL);
if (!bin)
return false;
};
struct anv_shader_bin *bin =
- anv_device_upload_kernel(device, device->blorp_cache, stage,
+ anv_device_upload_kernel(device, device->internal_cache, stage,
key, key_size, kernel, kernel_size,
prog_data, prog_data_size,
NULL, 0, NULL, &bind_map);
return true;
}
-bool
+void
anv_device_init_blorp(struct anv_device *device)
{
- /* BLORP needs its own pipeline cache because, unlike the rest of ANV, it
- * won't work at all without the cache. It depends on it for shaders to
- * remain resident while it runs. Therefore, we need a special cache just
- * for BLORP that's forced to always be enabled.
- */
- struct vk_pipeline_cache_create_info pcc_info = {
- .force_enable = true,
- };
- device->blorp_cache =
- vk_pipeline_cache_create(&device->vk, &pcc_info, NULL);
- if (device->blorp_cache == NULL)
- return false;
-
-
const struct blorp_config config = {
.use_mesh_shading = device->physical->vk.supported_extensions.NV_mesh_shader,
};
default:
unreachable("Unknown hardware generation");
}
- return true;
}
void
anv_device_finish_blorp(struct anv_device *device)
{
- vk_pipeline_cache_destroy(device->blorp_cache, NULL);
blorp_finish(&device->blorp);
}
goto fail_trivial_batch_bo_and_scratch_pool;
}
- result = anv_device_init_rt_shaders(device);
- if (result != VK_SUCCESS)
+ /* Internal shaders need their own pipeline cache because, unlike the rest
+ * of ANV, it won't work at all without the cache. It depends on it for
+ * shaders to remain resident while it runs. Therefore, we need a special
+ * cache just for BLORP/RT that's forced to always be enabled.
+ */
+ pcc_info.force_enable = true;
+ device->internal_cache =
+ vk_pipeline_cache_create(&device->vk, &pcc_info, NULL);
+ if (device->internal_cache == NULL) {
+ result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_default_pipeline_cache;
+ }
- if (!anv_device_init_blorp(device)) {
+ result = anv_device_init_rt_shaders(device);
+ if (result != VK_SUCCESS) {
result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- goto fail_rt_shaders;
+ goto fail_internal_cache;
}
+ anv_device_init_blorp(device);
+
anv_device_init_border_colors(device);
anv_device_perf_init(device);
return VK_SUCCESS;
- fail_rt_shaders:
- anv_device_finish_rt_shaders(device);
+ fail_internal_cache:
+ vk_pipeline_cache_destroy(device->internal_cache, NULL);
fail_default_pipeline_cache:
vk_pipeline_cache_destroy(device->default_pipeline_cache, NULL);
fail_trivial_batch_bo_and_scratch_pool:
anv_device_finish_rt_shaders(device);
+ vk_pipeline_cache_destroy(device->internal_cache, NULL);
vk_pipeline_cache_destroy(device->default_pipeline_cache, NULL);
#ifdef HAVE_VALGRIND
},
};
device->rt_trampoline =
- anv_device_search_for_kernel(device, device->default_pipeline_cache,
+ anv_device_search_for_kernel(device, device->internal_cache,
&trampoline_key, sizeof(trampoline_key),
&cache_hit);
if (device->rt_trampoline == NULL) {
brw_compile_cs(device->physical->compiler, tmp_ctx, ¶ms);
device->rt_trampoline =
- anv_device_upload_kernel(device, device->default_pipeline_cache,
+ anv_device_upload_kernel(device, device->internal_cache,
MESA_SHADER_COMPUTE,
&trampoline_key, sizeof(trampoline_key),
tramp_data,
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
+ /* The cache already has a reference and it's not going anywhere so there
+ * is no need to hold a second reference.
+ */
+ anv_shader_bin_unref(device, device->rt_trampoline);
+
struct brw_rt_trivial_return {
char name[16];
struct brw_bs_prog_key key;
.name = "rt-trivial-ret",
};
device->rt_trivial_return =
- anv_device_search_for_kernel(device, device->default_pipeline_cache,
+ anv_device_search_for_kernel(device, device->internal_cache,
&return_key, sizeof(return_key),
&cache_hit);
if (device->rt_trivial_return == NULL) {
brw_compile_bs(device->physical->compiler, tmp_ctx, ¶ms);
device->rt_trivial_return =
- anv_device_upload_kernel(device, device->default_pipeline_cache,
+ anv_device_upload_kernel(device, device->internal_cache,
MESA_SHADER_CALLABLE,
&return_key, sizeof(return_key),
return_data, return_prog_data.base.program_size,
ralloc_free(tmp_ctx);
- if (device->rt_trivial_return == NULL) {
- anv_shader_bin_unref(device, device->rt_trampoline);
+ if (device->rt_trivial_return == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- }
}
+ /* The cache already has a reference and it's not going anywhere so there
+ * is no need to hold a second reference.
+ */
+ anv_shader_bin_unref(device, device->rt_trivial_return);
+
return VK_SUCCESS;
}
{
if (!device->vk.enabled_extensions.KHR_ray_tracing_pipeline)
return;
-
- anv_shader_bin_unref(device, device->rt_trampoline);
}
VkResult
struct anv_state null_surface_state;
struct vk_pipeline_cache * default_pipeline_cache;
- struct vk_pipeline_cache * blorp_cache;
+ struct vk_pipeline_cache * internal_cache;
struct blorp_context blorp;
struct anv_state border_colors;
return isl_mocs(&device->isl_dev, usage, bo && bo->is_external);
}
-bool anv_device_init_blorp(struct anv_device *device);
+void anv_device_init_blorp(struct anv_device *device);
void anv_device_finish_blorp(struct anv_device *device);
enum anv_bo_alloc_flags {