device->global_bo_map = global;
tu_init_clear_blit_shaders(device);
+ result = tu_init_empty_shaders(device);
+ if (result != VK_SUCCESS) {
+ vk_startup_errorf(device->instance, result, "empty shaders");
+ goto fail_empty_shaders;
+ }
+
global->predicate = 0;
global->vtx_stats_query_not_running = 1;
global->dbg_one = (uint32_t)-1;
fail_pipeline_cache:
tu_destroy_dynamic_rendering(device);
fail_dynamic_rendering:
+ tu_destroy_empty_shaders(device);
+fail_empty_shaders:
tu_destroy_clear_blit_shaders(device);
fail_global_bo_map:
tu_bo_finish(device, device->global_bo);
tu_destroy_clear_blit_shaders(device);
+ tu_destroy_empty_shaders(device);
+
tu_destroy_dynamic_rendering(device);
ir3_compiler_destroy(device->compiler);
struct ir3_shader *global_shaders[GLOBAL_SH_COUNT];
uint64_t global_shader_va[GLOBAL_SH_COUNT];
+ struct tu_shader *empty_tcs, *empty_tes, *empty_gs, *empty_fs;
+
uint32_t vsc_draw_strm_pitch;
uint32_t vsc_prim_strm_pitch;
BITSET_DECLARE(custom_border_color, TU_BORDER_COLOR_COUNT);
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
vk_find_struct_const(builder->create_info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
- bool must_compile =
- builder->state & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
+ bool must_compile = false;
for (uint32_t i = 0; i < builder->create_info->stageCount; i++) {
if (!(builder->active_stages & builder->create_info->pStages[i].stage))
continue;
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
stage = (gl_shader_stage) (stage + 1)) {
- if (stage_infos[stage] || nir[stage] ||
- (stage == MESA_SHADER_FRAGMENT &&
- (builder->state & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT))) {
+ if (stage_infos[stage] || nir[stage]) {
bool shader_application_cache_hit;
shader_sha1[20] = (unsigned char) stage;
shaders[stage] =
stage_feedbacks[stage].duration += os_time_get_nano() - stage_start;
}
- if (!nir[MESA_SHADER_FRAGMENT] &&
- (builder->state & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)) {
- const nir_shader_compiler_options *nir_options =
- ir3_get_compiler_options(builder->device->compiler);
- nir_builder fs_b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
- nir_options,
- "noop_fs");
- nir[MESA_SHADER_FRAGMENT] = fs_b.shader;
- }
-
if (executable_info) {
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
stage < ARRAY_SIZE(nir);
*/
if (builder->state &
VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
- for (gl_shader_stage stage = MESA_SHADER_TESS_CTRL; stage <= MESA_SHADER_GEOMETRY;
- stage = (gl_shader_stage) (stage + 1)) {
- if (!shaders[stage]) {
- result = tu_empty_shader_create(builder->device, &shaders[stage],
- stage);
- if (result != VK_SUCCESS)
- goto fail;
- }
+ if (!shaders[MESA_SHADER_TESS_CTRL]) {
+ shaders[MESA_SHADER_TESS_CTRL] = builder->device->empty_tcs;
+ vk_pipeline_cache_object_ref(&shaders[MESA_SHADER_TESS_CTRL]->base);
+ }
+ if (!shaders[MESA_SHADER_TESS_EVAL]) {
+ shaders[MESA_SHADER_TESS_EVAL] = builder->device->empty_tes;
+ vk_pipeline_cache_object_ref(&shaders[MESA_SHADER_TESS_EVAL]->base);
+ }
+ if (!shaders[MESA_SHADER_GEOMETRY]) {
+ shaders[MESA_SHADER_GEOMETRY] = builder->device->empty_gs;
+ vk_pipeline_cache_object_ref(&shaders[MESA_SHADER_GEOMETRY]->base);
+ }
+ }
+
+ if (builder->state &
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
+ if (!shaders[MESA_SHADER_FRAGMENT]) {
+ shaders[MESA_SHADER_FRAGMENT] = builder->device->empty_fs;
+ vk_pipeline_cache_object_ref(&shaders[MESA_SHADER_FRAGMENT]->base);
}
}
return VK_SUCCESS;
}
-VkResult
+static VkResult
tu_empty_shader_create(struct tu_device *dev,
struct tu_shader **shader_out,
gl_shader_stage stage)
return VK_SUCCESS;
}
+static VkResult
+tu_empty_fs_create(struct tu_device *dev, struct tu_shader **shader)
+{
+ struct ir3_shader_key key = {};
+ const struct ir3_shader_options options = {};
+ struct ir3_stream_output_info so_info = {};
+ const nir_shader_compiler_options *nir_options =
+ ir3_get_compiler_options(dev->compiler);
+ nir_builder fs_b;
+
+ fs_b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, nir_options,
+ "noop_fs");
+
+ *shader = tu_shader_init(dev, NULL, 0);
+ if (!*shader)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ struct ir3_shader *ir3_shader =
+ ir3_shader_from_nir(dev->compiler, fs_b.shader, &options, &so_info);
+ (*shader)->variant = ir3_shader_create_variant(ir3_shader, &key, false);
+
+ return tu_upload_shader(dev, *shader);
+}
+
+VkResult
+tu_init_empty_shaders(struct tu_device *dev)
+{
+ VkResult result;
+
+ result = tu_empty_shader_create(dev, &dev->empty_tcs, MESA_SHADER_TESS_CTRL);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ result = tu_empty_shader_create(dev, &dev->empty_tes, MESA_SHADER_TESS_EVAL);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ result = tu_empty_shader_create(dev, &dev->empty_gs, MESA_SHADER_GEOMETRY);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ result = tu_empty_fs_create(dev, &dev->empty_fs);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ return VK_SUCCESS;
+
+out:
+ if (dev->empty_tcs)
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_tcs->base);
+ if (dev->empty_tes)
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_tes->base);
+ if (dev->empty_gs)
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_gs->base);
+ if (dev->empty_fs)
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_fs->base);
+ return result;
+}
+
+void
+tu_destroy_empty_shaders(struct tu_device *dev)
+{
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_tcs->base);
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_tes->base);
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_gs->base);
+ vk_pipeline_cache_object_unref(&dev->vk, &dev->empty_fs->base);
+}
+
void
tu_shader_destroy(struct tu_device *dev,
struct tu_shader *shader)
bool executable_info);
VkResult
-tu_empty_shader_create(struct tu_device *device,
- struct tu_shader **shader_out,
- gl_shader_stage stage);
+tu_init_empty_shaders(struct tu_device *device);
+
+void
+tu_destroy_empty_shaders(struct tu_device *device);
void
tu_shader_destroy(struct tu_device *dev,