cmd_buffer->state.emitted_graphics_pipeline->db_render_control != pipeline->db_render_control ||
cmd_buffer->state.emitted_graphics_pipeline->rast_prim != pipeline->rast_prim)
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_RASTERIZATION_SAMPLES;
+
+ if (cmd_buffer->state.emitted_graphics_pipeline->uses_inner_coverage != pipeline->uses_inner_coverage)
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_CONSERVATIVE_RAST_MODE;
}
radeon_emit_array(cmd_buffer->cs, pipeline->base.cs.buf, pipeline->base.cs.cdw);
static void
radv_emit_conservative_rast_mode(struct radv_cmd_buffer *cmd_buffer)
{
+ const struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
const struct radv_physical_device *pdevice = cmd_buffer->device->physical_device;
const struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
pa_sc_conservative_rast = S_028C4C_PREZ_AA_MASK_ENABLE(1) | S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
- if (d->vk.rs.conservative_mode == VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT) {
+ /* Inner coverage requires underestimate conservative rasterization. */
+ if (d->vk.rs.conservative_mode == VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT &&
+ !pipeline->uses_inner_coverage) {
pa_sc_conservative_rast |= S_028C4C_OVER_RAST_ENABLE(1) |
S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
} else {
- assert(d->vk.rs.conservative_mode ==
- VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT);
pa_sc_conservative_rast |=
S_028C4C_OVER_RAST_SAMPLE_SELECT(1) | S_028C4C_UNDER_RAST_ENABLE(1);
}
static void
radv_emit_msaa_state(struct radv_cmd_buffer *cmd_buffer)
{
+ const struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
const struct radv_physical_device *pdevice = cmd_buffer->device->physical_device;
unsigned rasterization_samples = radv_get_rasterization_samples(cmd_buffer);
const struct radv_rendering_state *render = &cmd_buffer->state.render;
S_028BE0_COVERED_CENTROID_IS_CENTER(pdevice->rad_info.gfx_level >= GFX10_3);
}
+ pa_sc_aa_config |= S_028BE0_COVERAGE_TO_SHADER_SELECT(pipeline->uses_inner_coverage);
+
radeon_set_context_reg(cmd_buffer->cs, R_028804_DB_EQAA, db_eqaa);
radeon_set_context_reg(cmd_buffer->cs, R_028BE0_PA_SC_AA_CONFIG, pa_sc_aa_config);
radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0,
pipeline->force_vrs_per_vertex =
pipeline->base.shaders[pipeline->last_vgt_api_stage]->info.force_vrs_per_vertex;
pipeline->uses_user_sample_locations = state.ms && state.ms->sample_locations_enable;
+ pipeline->uses_inner_coverage =
+ pipeline->base.shaders[MESA_SHADER_FRAGMENT]->info.ps.reads_fully_covered;
pipeline->rast_prim = vgt_gs_out_prim_type;
pipeline->last_vgt_api_stage_locs = pipeline->base.shaders[pipeline->last_vgt_api_stage]->info.user_sgprs_locs.shader_data;
bool use_per_attribute_vb_descs;
bool can_use_simple_input;
bool uses_user_sample_locations;
+
+ /* Whether the pipeline uses inner coverage which means that a fragment has all of its pixel
+ * squares fully covered by the generating primitive.
+ */
+ bool uses_inner_coverage;
+
bool need_null_export_workaround;
/* Whether the pipeline forces per-vertex VRS (GFX10.3+). */
bool force_vrs_per_vertex;