*/
bool fallback_scheduler;
+ /* Disable TMU pipelining. This may increase the chances of being able
+ * to compile shaders with high register pressure that require to emit
+ * TMU spills.
+ */
+ bool disable_tmu_pipelining;
+
/* State for whether we're executing on each channel currently. 0 if
* yes, otherwise a block number + 1 that the channel jumped to.
*/
void *debug_output_data),
void *debug_output_data,
int program_id, int variant_id,
+ bool disable_tmu_pipelining,
bool fallback_scheduler)
{
struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
c->debug_output_data = debug_output_data;
c->compilation_result = V3D_COMPILATION_SUCCEEDED;
c->fallback_scheduler = fallback_scheduler;
+ c->disable_tmu_pipelining = disable_tmu_pipelining;
s = nir_shader_clone(c, s);
c->s = s;
{
struct v3d_compile *c;
- for (int i = 0; true; i++) {
+ static const char *strategies[] = {
+ "default",
+ "disable TMU pipelining",
+ "fallback scheduler"
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(strategies); i++) {
c = vir_compile_init(compiler, key, s,
debug_output, debug_output_data,
program_id, variant_id,
- i > 0 /* fallback_scheduler */);
+ i > 0, /* Disable TMU pipelining */
+ i > 1 /* Fallback_scheduler */);
v3d_attempt_compile(c);
- if (i > 0 ||
+ if (i >= ARRAY_SIZE(strategies) - 1 ||
c->compilation_result !=
- V3D_COMPILATION_FAILED_REGISTER_ALLOCATION)
+ V3D_COMPILATION_FAILED_REGISTER_ALLOCATION) {
break;
+ }
+ /* Fallback strategy */
char *debug_msg;
int ret = asprintf(&debug_msg,
- "Using fallback scheduler for %s",
+ "Falling back to strategy '%s' for %s",
+ strategies[i + 1],
vir_get_stage_name(c));
if (ret >= 0) {