}
static void
+tu_render_pass_calc_views(struct tu_render_pass *pass)
+{
+ uint32_t view_mask = 0;
+ for (unsigned i = 0; i < pass->subpass_count; i++)
+ view_mask |= pass->subpasses[i].multiview_mask;
+ pass->num_views = util_last_bit(view_mask);
+}
+
+static void
tu_render_pass_calc_hash(struct tu_render_pass *pass)
{
#define HASH(hash, data) XXH64(&(data), sizeof(data), hash)
tu_render_pass_cond_config(pass);
tu_render_pass_gmem_config(pass, device->physical_device);
tu_render_pass_bandwidth_config(pass);
+ tu_render_pass_calc_views(pass);
tu_render_pass_calc_hash(pass);
for (unsigned i = 0; i < pCreateInfo->dependencyCount; ++i) {
tu_render_pass_cond_config(pass);
tu_render_pass_gmem_config(pass, device->physical_device);
tu_render_pass_bandwidth_config(pass);
+ tu_render_pass_calc_views(pass);
tu_render_pass_calc_hash(pass);
}
} else {
subpass->depth_stencil_attachment.attachment = VK_ATTACHMENT_UNUSED;
}
+
+ tu_render_pass_calc_views(pass);
}
VKAPI_ATTR void VKAPI_CALL
* used.
*/
- uint32_t layers = fb->layers;
- if (pass->subpasses[0].multiview_mask) {
- uint32_t view_mask = 0;
- for (unsigned i = 0; i < pass->subpass_count; i++)
- view_mask |= pass->subpasses[i].multiview_mask;
- layers = util_logbase2(view_mask) + 1;
- }
+ uint32_t layers = MAX2(fb->layers, pass->num_views);
/* If there is more than one layer, we need to make sure that the layer
* stride is expressible as an offset in RB_BLIT_BASE_GMEM which ignores