return groups | fully_dynamic_state_groups(state->dynamic);
}
+void
+vk_graphics_pipeline_get_state(const struct vk_graphics_pipeline_state *state,
+ BITSET_WORD *set_state_out)
+{
+ /* For now, we just validate dynamic state */
+ enum mesa_vk_graphics_state_groups groups = 0;
+
+#define FILL_HAS(STATE, type, s) \
+ if (state->s != NULL) groups |= STATE
+
+ FOREACH_STATE_GROUP(FILL_HAS)
+
+#undef FILL_HAS
+
+ BITSET_DECLARE(set_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ get_dynamic_state_groups(set_state, groups);
+ BITSET_ANDNOT(set_state, set_state, state->dynamic);
+ memcpy(set_state_out, set_state, sizeof(set_state));
+}
+
static void
vk_graphics_pipeline_state_validate(const struct vk_graphics_pipeline_state *state)
{
#undef MERGE
}
+static bool
+is_group_all_dynamic(const struct vk_graphics_pipeline_state *state,
+ enum mesa_vk_graphics_state_groups group)
+{
+ /* Render pass is a bit special, because it contains always-static state
+ * (e.g. the view mask). It's never all dynamic.
+ */
+ if (group == MESA_VK_GRAPHICS_STATE_RENDER_PASS_BIT)
+ return false;
+
+ BITSET_DECLARE(group_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ BITSET_DECLARE(dynamic_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ get_dynamic_state_groups(group_state, group);
+ BITSET_AND(dynamic_state, group_state, state->dynamic);
+ return BITSET_EQUAL(dynamic_state, group_state);
+}
+
+VkResult
+vk_graphics_pipeline_state_copy(const struct vk_device *device,
+ struct vk_graphics_pipeline_state *state,
+ const struct vk_graphics_pipeline_state *old_state,
+ const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ void **alloc_ptr_out)
+{
+ vk_graphics_pipeline_state_validate(old_state);
+
+ VK_MULTIALLOC(ma);
+
+#define ENSURE_STATE_IF_NEEDED(STATE, type, s) \
+ struct type *new_##s = NULL; \
+ if (old_state->s && !is_group_all_dynamic(state, STATE)) { \
+ vk_multialloc_add(&ma, &new_##s, struct type, 1); \
+ }
+
+ FOREACH_STATE_GROUP(ENSURE_STATE_IF_NEEDED)
+
+#undef ENSURE_STATE_IF_NEEDED
+
+ /* Sample locations are a bit special. */
+ struct vk_sample_locations_state *new_sample_locations = NULL;
+ if (old_state->ms && old_state->ms->sample_locations &&
+ !BITSET_TEST(old_state->dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS)) {
+ assert(old_state->ms->sample_locations);
+ vk_multialloc_add(&ma, &new_sample_locations,
+ struct vk_sample_locations_state, 1);
+ }
+
+ if (ma.size > 0) {
+ *alloc_ptr_out = vk_multialloc_alloc2(&ma, &device->alloc, alloc, scope);
+ if (*alloc_ptr_out == NULL)
+ return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ if (new_sample_locations) {
+ *new_sample_locations = *old_state->ms->sample_locations;
+ }
+
+ if (new_ms) {
+ new_ms->sample_locations = new_sample_locations;
+ }
+
+#define COPY_STATE_IF_NEEDED(STATE, type, s) \
+ if (new_##s) { \
+ *new_##s = *old_state->s; \
+ } \
+ state->s = new_##s;
+
+ FOREACH_STATE_GROUP(COPY_STATE_IF_NEEDED)
+
+ state->shader_stages = old_state->shader_stages;
+ BITSET_COPY(state->dynamic, old_state->dynamic);
+
+#undef COPY_STATE_IF_NEEDED
+
+ vk_graphics_pipeline_state_validate(state);
+ return VK_SUCCESS;
+}
+
const struct vk_dynamic_graphics_state vk_default_dynamic_graphics_state = {
.rs = {
.line = {
VkSystemAllocationScope scope,
void **alloc_ptr_out);
+/** Populate a vk_graphics_pipeline_state from another one.
+ *
+ * This allocates space for graphics pipeline state and copies it from another
+ * pipeline state. It ignores state in `old_state` which is not set and does
+ * not allocate memory if the entire group is unused. The intended use-case is
+ * for drivers that may be able to precompile some state ahead of time, to
+ * avoid allocating memory for it in pipeline libraries. The workflow looks
+ * something like this:
+ *
+ * struct vk_graphics_pipeline_all_state all;
+ * struct vk_graphics_pipeline_state state;
+ * vk_graphics_pipeline_state_fill(dev, &state, ..., &all, NULL, 0, NULL);
+ *
+ * ...
+ *
+ * BITSET_DECLARE(set_state, MESA_VK_DYNAMIC_GRAPHICS_STATE_ENUM_MAX);
+ * vk_graphics_pipeline_get_state(&state, &set_state);
+ *
+ * ...
+ *
+ * if (BITSET_TEST(set_state, MESA_VK_DYNAMIC_FOO)) {
+ * emit_foo(&state.foo, ...);
+ * BITSET_SET(state.dynamic, MESA_VK_DYNAMIC_FOO);
+ * }
+ *
+ * ...
+ *
+ * if (pipeline->is_library) {
+ * library = pipeline_to_library(pipeline);
+ * vk_graphics_pipeline_state_copy(dev, &library->state, &state, ...);
+ * }
+ *
+ * In this case we will avoid allocating memory for `library->state.foo`.
+ *
+ * @param[in] device The Vulkan device
+ * @param[out] state The graphics pipeline state to populate
+ * @param[in] old_state The graphics pipeline state to copy from
+ * @param[in] alloc Allocation callbacks for dynamically allocating
+ * new state memory.
+ * @param[in] scope Allocation scope for dynamically allocating new
+ * state memory.
+ * @param[out] alloc_ptr_out Will be populated with a pointer to any newly
+ * allocated state. The driver is responsible for
+ * freeing this pointer.
+ */
+VkResult
+vk_graphics_pipeline_state_copy(const struct vk_device *device,
+ struct vk_graphics_pipeline_state *state,
+ const struct vk_graphics_pipeline_state *old_state,
+ const VkAllocationCallbacks *alloc,
+ VkSystemAllocationScope scope,
+ void **alloc_ptr_out);
+
/** Merge one vk_graphics_pipeline_state into another
*
* Both the destination and source states are assumed to be valid (i.e., all
vk_graphics_pipeline_state_merge(struct vk_graphics_pipeline_state *dst,
const struct vk_graphics_pipeline_state *src);
+/** Get the states which will be set for a given vk_graphics_pipeline_state
+ *
+ * Return which states should be set when the pipeline is bound.
+ */
+void
+vk_graphics_pipeline_get_state(const struct vk_graphics_pipeline_state *state,
+ BITSET_WORD *set_state_out);
+
extern const struct vk_dynamic_graphics_state vk_default_dynamic_graphics_state;
/** Initialize a vk_dynamic_graphics_state with defaults