}
}
-/* Note that the following poopulate methods doesn't do a detailed fill-up of
- * the v3d_fs_key. Here we just fill-up cmd_buffer specific info. All info
- * coming from the pipeline create info was alredy filled up when the pipeline
- * was created
- */
-static void
-cmd_buffer_populate_v3d_key(struct v3d_key *key,
- struct v3dv_cmd_buffer *cmd_buffer,
- VkPipelineBindPoint pipeline_binding)
-{
- if (cmd_buffer->state.pipeline->combined_index_map != NULL) {
- struct v3dv_descriptor_map *texture_map = &cmd_buffer->state.pipeline->texture_map;
- struct v3dv_descriptor_map *sampler_map = &cmd_buffer->state.pipeline->sampler_map;
- struct v3dv_descriptor_state *descriptor_state =
- &cmd_buffer->state.descriptor_state[pipeline_binding];
-
- /* At pipeline creation time it was pre-generated an all-16 bit and an
- * all-32 bit variant, so let's do the same here to avoid as much as
- * possible a new compilation.
- */
- uint32_t v3d_key_return_size = 16;
- hash_table_foreach(cmd_buffer->state.pipeline->combined_index_map, entry) {
- uint32_t combined_idx = (uint32_t)(uintptr_t) (entry->data);
- uint32_t combined_idx_key =
- cmd_buffer->state.pipeline->combined_index_to_key_map[combined_idx];
- uint32_t texture_idx;
- uint32_t sampler_idx;
-
- v3dv_pipeline_combined_index_key_unpack(combined_idx_key,
- &texture_idx, &sampler_idx);
-
- VkFormat vk_format;
- const struct v3dv_format *format;
-
- format =
- v3dv_descriptor_map_get_texture_format(descriptor_state,
- texture_map,
- cmd_buffer->state.pipeline->layout,
- texture_idx,
- &vk_format);
-
- const struct v3dv_sampler *sampler = NULL;
- if (sampler_idx != V3DV_NO_SAMPLER_IDX) {
- sampler =
- v3dv_descriptor_map_get_sampler(descriptor_state,
- sampler_map,
- cmd_buffer->state.pipeline->layout,
- sampler_idx);
- assert(sampler);
- }
-
- key->tex[combined_idx].return_size =
- v3dv_get_tex_return_size(format,
- sampler ? sampler->compare_enable : false);
-
- if (key->tex[combined_idx].return_size == 32) {
- v3d_key_return_size = 32;
- }
- }
- v3d_key_update_return_size(cmd_buffer->state.pipeline, key,
- v3d_key_return_size);
- }
-}
-
-static void
-update_fs_variant(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_shader_variant *variant;
- struct v3dv_pipeline_stage *p_stage = cmd_buffer->state.pipeline->fs;
- struct v3d_fs_key local_key;
-
- /* We start with a copy of the original pipeline key */
- memcpy(&local_key, &p_stage->key.fs, sizeof(struct v3d_fs_key));
-
- cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer,
- VK_PIPELINE_BIND_POINT_GRAPHICS);
-
- VkResult vk_result;
- variant = v3dv_get_shader_variant(p_stage, NULL, &local_key.base,
- sizeof(struct v3d_fs_key),
- &cmd_buffer->device->alloc,
- &vk_result);
- /* At this point we are not creating a vulkan object to return to the
- * API user, so we can't really return back a OOM error
- */
- assert(variant);
- assert(vk_result == VK_SUCCESS);
-
- if (p_stage->current_variant != variant) {
- v3dv_shader_variant_unref(cmd_buffer->device, p_stage->current_variant);
- }
- p_stage->current_variant = variant;
-}
-
-static void
-update_vs_variant(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_shader_variant *variant;
- struct v3dv_pipeline_stage *p_stage;
- struct v3d_vs_key local_key;
- VkResult vk_result;
-
- /* We start with a copy of the original pipeline key */
- p_stage = cmd_buffer->state.pipeline->vs;
- memcpy(&local_key, &p_stage->key.vs, sizeof(struct v3d_vs_key));
-
- cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer,
- VK_PIPELINE_BIND_POINT_GRAPHICS);
-
- variant = v3dv_get_shader_variant(p_stage, NULL, &local_key.base,
- sizeof(struct v3d_vs_key),
- &cmd_buffer->device->alloc,
- &vk_result);
- /* At this point we are not creating a vulkan object to return to the
- * API user, so we can't really return back a OOM error
- */
- assert(variant);
- assert(vk_result == VK_SUCCESS);
-
- if (p_stage->current_variant != variant) {
- v3dv_shader_variant_unref(cmd_buffer->device, p_stage->current_variant);
- }
- p_stage->current_variant = variant;
-
- /* Now the vs_bin */
- p_stage = cmd_buffer->state.pipeline->vs_bin;
- memcpy(&local_key, &p_stage->key.vs, sizeof(struct v3d_vs_key));
-
- cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer,
- VK_PIPELINE_BIND_POINT_GRAPHICS);
- variant = v3dv_get_shader_variant(p_stage, NULL, &local_key.base,
- sizeof(struct v3d_vs_key),
- &cmd_buffer->device->alloc,
- &vk_result);
-
- /* At this point we are not creating a vulkan object to return to the
- * API user, so we can't really return back a OOM error
- */
- assert(variant);
- assert(vk_result == VK_SUCCESS);
-
- if (p_stage->current_variant != variant) {
- v3dv_shader_variant_unref(cmd_buffer->device, p_stage->current_variant);
- }
- p_stage->current_variant = variant;
-}
-
-static void
-update_cs_variant(struct v3dv_cmd_buffer *cmd_buffer)
-{
- struct v3dv_shader_variant *variant;
- struct v3dv_pipeline_stage *p_stage = cmd_buffer->state.pipeline->cs;
- struct v3d_key local_key;
-
- /* We start with a copy of the original pipeline key */
- memcpy(&local_key, &p_stage->key.base, sizeof(struct v3d_key));
-
- cmd_buffer_populate_v3d_key(&local_key, cmd_buffer,
- VK_PIPELINE_BIND_POINT_COMPUTE);
-
- VkResult result;
- variant = v3dv_get_shader_variant(p_stage, NULL, &local_key,
- sizeof(struct v3d_key),
- &cmd_buffer->device->alloc,
- &result);
- /* At this point we are not creating a vulkan object to return to the
- * API user, so we can't really return back a OOM error
- */
- assert(variant);
- assert(result == VK_SUCCESS);
-
- if (p_stage->current_variant != variant) {
- v3dv_shader_variant_unref(cmd_buffer->device, p_stage->current_variant);
- }
- p_stage->current_variant = variant;
-}
-
-/*
- * Some updates on the cmd buffer requires also updates on the shader being
- * compiled at the pipeline. The poster boy here are textures, as the compiler
- * needs to do certain things depending on the texture format. So here we
- * re-create the v3d_keys and update the variant. Note that internally the
- * pipeline has a variant cache (hash table) to avoid unneeded compilations
- *
- */
-static void
-update_pipeline_variants(struct v3dv_cmd_buffer *cmd_buffer)
-{
- assert(cmd_buffer->state.pipeline);
-
- if (v3dv_pipeline_get_binding_point(cmd_buffer->state.pipeline) ==
- VK_PIPELINE_BIND_POINT_GRAPHICS) {
- update_fs_variant(cmd_buffer);
- update_vs_variant(cmd_buffer);
- } else {
- update_cs_variant(cmd_buffer);
- }
-}
-
static void
bind_graphics_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline)
job = cmd_buffer_pre_draw_split_job(cmd_buffer);
job->draw_count++;
- /* We may need to compile shader variants based on bound textures */
- uint32_t *dirty = &cmd_buffer->state.dirty;
- if (*dirty & (V3DV_CMD_DIRTY_PIPELINE |
- V3DV_CMD_DIRTY_DESCRIPTOR_SETS)) {
- update_pipeline_variants(cmd_buffer);
- }
-
/* GL shader state binds shaders, uniform and vertex attribute state. The
* compiler injects uniforms to handle some descriptor types (such as
* textures), so we need to regen that when descriptor state changes.
* We also need to emit new shader state if we have a dirty viewport since
* that will require that we new uniform state for QUNIFORM_VIEWPORT_*.
*/
+ uint32_t *dirty = &cmd_buffer->state.dirty;
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE |
V3DV_CMD_DIRTY_VERTEX_BUFFER |
V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
assert(cmd_buffer->state.pipeline);
assert(cmd_buffer->state.pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
- /* We may need to compile shader variants based on bound textures */
uint32_t *dirty = &cmd_buffer->state.dirty;
- if (*dirty & (V3DV_CMD_DIRTY_PIPELINE |
- V3DV_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS)) {
- update_pipeline_variants(cmd_buffer);
- }
-
*dirty &= ~(V3DV_CMD_DIRTY_PIPELINE |
V3DV_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS);
}
return variant;
}
-/* This methods updates the return size for a given key. It assumes that it
- * was already properly populated. So for example values for key->num_tex_used
- * should be correct at this point
- *
- * Note that even the @return_size to set is 32bit, it could be overriden to
- * 16bit, like for shadow textures, that we know in advance that they are
- * always 16bit.
- */
-void
-v3d_key_update_return_size(struct v3dv_pipeline *pipeline,
- struct v3d_key *key,
- uint32_t return_size)
-{
- assert(return_size == 32 || return_size == 16);
- struct v3dv_descriptor_map *texture_map = &pipeline->texture_map;
-
- for (uint32_t tex_idx = 0; tex_idx < key->num_tex_used; tex_idx++) {
- key->tex[tex_idx].return_size =
- texture_map->is_shadow[tex_idx] ? 16 : return_size;
-
- key->tex[tex_idx].return_channels =
- key->tex[tex_idx].return_size == 16 ? 2 : 4;
- }
-}
-
/*
* To avoid needed too many shader re-compilation after pipeline creation
* time, we pre-generate several options, so they are available on the default
- * cache. The poster boy here is return size for texture acceses, as the real
- * value needed would depend on the texture format used.
+ * cache.
+ *
+ * NOTE: although some time ago we pre-generated two variants, right now we
+ * only create one variant. We maintain this method because it is really
+ * likely that we would need to rely on multiple variants as we keep working
+ * on possible optimizations that can't be decided at pipeline creation time.
*/
static struct v3dv_shader_variant*
pregenerate_shader_variants(struct v3dv_pipeline_stage *p_stage,
const VkAllocationCallbacks *pAllocator,
VkResult *out_vk_result)
{
- /* We assume that we receive the default 16 return size*/
- struct v3dv_shader_variant *variant_16 =
+ struct v3dv_shader_variant *default_variant =
v3dv_get_shader_variant(p_stage, cache, key, key_size,
pAllocator, out_vk_result);
if (*out_vk_result != VK_SUCCESS)
- return variant_16;
+ return default_variant;
if (!p_stage->pipeline->device->instance->default_pipeline_cache_enabled) {
/* If pipeline cache is disabled it doesn't make sense to pre-generate,
* as we are relying on the default pipeline cache to save the different
* pre-compiled variants
*/
- return variant_16;
+ return default_variant;
}
- v3d_key_update_return_size(p_stage->pipeline, key, 32);
-
- struct v3dv_shader_variant *variant_32 =
- v3dv_get_shader_variant(p_stage, cache, key, key_size,
- pAllocator, out_vk_result);
-
- /* get_shader_variant returns a new ref, so as we are going to use
- * variant_16, we need to unref this.
- */
- v3dv_shader_variant_unref(p_stage->pipeline->device, variant_32);
+ /* FIXME: placeholder. Here we would pre-generate other variants if needed */
- return variant_16;
+ return default_variant;
}
/* FIXME: C&P from st, common place? */