From b6946d35c832979833559993121d1333482092f5 Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Thu, 25 Jul 2019 08:44:53 -0700 Subject: [PATCH] pan/midgard: Implement texture RA MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit total instructions in shared programs: 3916 -> 3665 (-6.41%) instructions in affected programs: 1405 -> 1154 (-17.86%) helped: 35 HURT: 0 helped stats (abs) min: 1 max: 21 x̄: 7.17 x̃: 3 helped stats (rel) min: 3.00% max: 28.57% x̄: 20.11% x̃: 21.74% 95% mean confidence interval for instructions value: -9.35 -4.99 95% mean confidence interval for instructions %-change: -22.75% -17.46% Instructions are helped. total bundles in shared programs: 2472 -> 2256 (-8.74%) bundles in affected programs: 906 -> 690 (-23.84%) helped: 32 HURT: 0 helped stats (abs) min: 1 max: 18 x̄: 6.75 x̃: 3 helped stats (rel) min: 5.56% max: 32.26% x̄: 20.83% x̃: 16.67% 95% mean confidence interval for bundles value: -9.09 -4.41 95% mean confidence interval for bundles %-change: -23.77% -17.89% Bundles are helped. total quadwords in shared programs: 3965 -> 3689 (-6.96%) quadwords in affected programs: 1568 -> 1292 (-17.60%) helped: 35 HURT: 0 helped stats (abs) min: 1 max: 21 x̄: 7.89 x̃: 3 helped stats (rel) min: 2.08% max: 28.57% x̄: 19.87% x̃: 20.00% 95% mean confidence interval for quadwords value: -10.38 -5.39 95% mean confidence interval for quadwords %-change: -22.57% -17.17% Quadwords are helped. total registers in shared programs: 411 -> 392 (-4.62%) registers in affected programs: 76 -> 57 (-25.00%) helped: 15 HURT: 0 helped stats (abs) min: 1 max: 2 x̄: 1.27 x̃: 1 helped stats (rel) min: 9.09% max: 50.00% x̄: 30.97% x̃: 33.33% 95% mean confidence interval for registers value: -1.52 -1.01 95% mean confidence interval for registers %-change: -39.12% -22.82% Registers are helped. total threads in shared programs: 426 -> 432 (1.41%) threads in affected programs: 6 -> 12 (100.00%) helped: 3 HURT: 0 helped stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2 helped stats (rel) min: 100.00% max: 100.00% x̄: 100.00% x̃: 100.00% Signed-off-by: Alyssa Rosenzweig --- src/panfrost/midgard/compiler.h | 16 ++- src/panfrost/midgard/midgard_compile.c | 117 ++++++++---------- src/panfrost/midgard/midgard_compile.h | 7 +- src/panfrost/midgard/midgard_ra.c | 207 +++++++++++++++++++++++++------- src/panfrost/midgard/midgard_schedule.c | 67 +++++++---- 5 files changed, 271 insertions(+), 143 deletions(-) diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h index 32714d7..d3f1004 100644 --- a/src/panfrost/midgard/compiler.h +++ b/src/panfrost/midgard/compiler.h @@ -207,6 +207,9 @@ typedef struct compiler_context { /* Current NIR function */ nir_function *func; + /* Allocated compiler temporary counter */ + unsigned temp_alloc; + /* Unordered list of midgard_blocks */ int block_count; struct list_head blocks; @@ -280,10 +283,12 @@ emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction in list_addtail(&(mir_upload_ins(ins))->link, &ctx->current_block->instructions); } -static inline void +static inline struct midgard_instruction * mir_insert_instruction_before(struct midgard_instruction *tag, struct midgard_instruction ins) { - list_addtail(&(mir_upload_ins(ins))->link, &tag->link); + struct midgard_instruction *u = mir_upload_ins(ins); + list_addtail(&u->link, &tag->link); + return u; } static inline void @@ -342,8 +347,6 @@ mir_next_op(struct midgard_instruction *ins) mir_foreach_block(ctx, v_block) \ mir_foreach_instr_in_block_safe(v_block, v) - - static inline midgard_instruction * mir_last_in_block(struct midgard_block *block) { @@ -454,12 +457,13 @@ struct ra_graph; /* Broad types of register classes so we can handle special * registers */ -#define NR_REG_CLASSES 3 +#define NR_REG_CLASSES 5 #define REG_CLASS_WORK 0 #define REG_CLASS_LDST 1 #define REG_CLASS_LDST27 2 -#define REG_CLASS_TEX 3 +#define REG_CLASS_TEXR 3 +#define REG_CLASS_TEXW 4 void mir_lower_special_reads(compiler_context *ctx); struct ra_graph* allocate_registers(compiler_context *ctx, bool *spilled); diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index 3b17d8d..b49f50d 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -304,6 +304,12 @@ nir_dest_index(compiler_context *ctx, nir_dest *dst) } } +static unsigned +make_compiler_temp(compiler_context *ctx) +{ + return ctx->func->impl->ssa_alloc + ctx->func->impl->reg_alloc + ctx->temp_alloc++; +} + static int sysval_for_instr(compiler_context *ctx, nir_instr *instr, unsigned *dest) { @@ -1538,10 +1544,6 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, //assert (!instr->sampler); //assert (!instr->texture_array_size); - /* Allocate registers via a round robin scheme to alternate between the two registers */ - int reg = ctx->texture_op_count & 1; - int in_reg = reg, out_reg = reg; - int texture_index = instr->texture_index; int sampler_index = texture_index; @@ -1549,14 +1551,18 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, midgard_instruction ins = { .type = TAG_TEXTURE_4, .mask = 0xF, + .ssa_args = { + .dest = nir_dest_index(ctx, &instr->dest), + .src0 = -1, + .src1 = -1, + }, .texture = { .op = midgard_texop, .format = midgard_tex_format(instr->sampler_dim), .texture_handle = texture_index, .sampler_handle = sampler_index, - - /* TODO: Regalloc it in */ .swizzle = SWIZZLE_XYZW, + .in_reg_swizzle = SWIZZLE_XYZW, /* TODO: half */ .in_reg_full = 1, @@ -1567,13 +1573,36 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, }; for (unsigned i = 0; i < instr->num_srcs; ++i) { - int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg); int index = nir_src_index(ctx, &instr->src[i].src); - int nr_comp = nir_src_num_components(instr->src[i].src); midgard_vector_alu_src alu_src = blank_alu_src; switch (instr->src[i].src_type) { case nir_tex_src_coord: { + emit_explicit_constant(ctx, index, index); + + /* Texelfetch coordinates uses all four elements + * (xyz/index) regardless of texture dimensionality, + * which means it's necessary to zero the unused + * components to keep everything happy */ + + if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) { + unsigned old_index = index; + + index = make_compiler_temp(ctx); + + /* mov index, old_index */ + midgard_instruction mov = v_mov(old_index, blank_alu_src, index); + mov.mask = 0x3; + emit_mir_instruction(ctx, mov); + + /* mov index.zw, #0 */ + mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), + blank_alu_src, index); + mov.has_constants = true; + mov.mask = (1 << COMPONENT_Z) | (1 << COMPONENT_W); + emit_mir_instruction(ctx, mov); + } + if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { /* texelFetch is undefined on samplerCube */ assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH); @@ -1582,46 +1611,23 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, * select the face and copy the xy into the * texture register */ - midgard_instruction st = m_st_cubemap_coords(reg, 0); + unsigned temp = make_compiler_temp(ctx); + + midgard_instruction st = m_st_cubemap_coords(temp, 0); st.ssa_args.src0 = index; st.load_store.unknown = 0x24; /* XXX: What is this? */ st.mask = 0x3; /* xy */ st.load_store.swizzle = alu_src.swizzle; emit_mir_instruction(ctx, st); - ins.texture.in_reg_swizzle = swizzle_of(2); + ins.ssa_args.src0 = temp; } else { - ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp); - - midgard_instruction mov = v_mov(index, alu_src, reg); - mov.mask = mask_of(nr_comp); - emit_mir_instruction(ctx, mov); - - if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) { - /* Texel fetch opcodes care about the - * values of z and w, so we actually - * need to spill into a second register - * for a texel fetch with register bias - * (for non-2D). TODO: Implement that - */ - - assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D); - - midgard_instruction zero = v_mov(index, alu_src, reg); - zero.ssa_args.inline_constant = true; - zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT); - zero.has_constants = true; - zero.mask = ~mov.mask; - emit_mir_instruction(ctx, zero); + ins.ssa_args.src0 = index; + } - ins.texture.in_reg_swizzle = SWIZZLE_XYZZ; - } else { - /* Non-texel fetch doesn't need that - * nonsense. However we do use the Z - * for array indexing */ - bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D; - ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ; - } + if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) { + /* Array component in w but NIR wants it in z */ + ins.texture.in_reg_swizzle = SWIZZLE_XYZZ; } break; @@ -1635,27 +1641,9 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture)) break; - /* Otherwise we use a register. To keep RA simple, we - * put the bias/LOD into the w component of the input - * source, which is otherwise in xy */ - - alu_src.swizzle = SWIZZLE_XXXX; - - midgard_instruction mov = v_mov(index, alu_src, reg); - mov.mask = 1 << COMPONENT_W; - emit_mir_instruction(ctx, mov); - ins.texture.lod_register = true; - - midgard_tex_register_select sel = { - .select = in_reg, - .full = 1, - .component = COMPONENT_W, - }; - - uint8_t packed; - memcpy(&packed, &sel, sizeof(packed)); - ins.texture.bias = packed; + ins.ssa_args.src1 = index; + emit_explicit_constant(ctx, index, index); break; }; @@ -1665,16 +1653,8 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, } } - /* Set registers to read and write from the same place */ - ins.texture.in_reg_select = in_reg; - ins.texture.out_reg_select = out_reg; - emit_mir_instruction(ctx, ins); - int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest); - midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index); - emit_mir_instruction(ctx, ins2); - /* Used for .cont and .last hinting */ ctx->texture_op_count++; } @@ -2290,6 +2270,7 @@ midgard_compile_shader_nir(struct midgard_screen *screen, nir_shader *nir, midga .nir = nir, .screen = screen, .stage = nir->info.stage, + .temp_alloc = 0, .is_blend = is_blend, .blend_constant_offset = 0, diff --git a/src/panfrost/midgard/midgard_compile.h b/src/panfrost/midgard/midgard_compile.h index b6cd2af..389dd1e3 100644 --- a/src/panfrost/midgard/midgard_compile.h +++ b/src/panfrost/midgard/midgard_compile.h @@ -41,10 +41,11 @@ struct midgard_screen { struct ra_regs *regs[9]; - /* Work register classes corresponds to the above register - * sets. 12 per set for 4 classes per work/ldst/tex */ + /* Work register classes corresponds to the above register sets. 20 per + * set for 4 classes per work/ldst/ldst27/texr/texw. TODO: Unify with + * compiler.h */ - unsigned reg_classes[9][12]; + unsigned reg_classes[9][4 * 5]; }; /* Define the general compiler entry point */ diff --git a/src/panfrost/midgard/midgard_ra.c b/src/panfrost/midgard/midgard_ra.c index cca167c..dfce044 100644 --- a/src/panfrost/midgard/midgard_ra.c +++ b/src/panfrost/midgard/midgard_ra.c @@ -44,7 +44,13 @@ */ #define WORK_STRIDE 10 + +/* We have overlapping register classes for special registers, handled via + * shadows */ + #define SHADOW_R27 17 +#define SHADOW_R28 18 +#define SHADOW_R29 19 /* Prepacked masks/swizzles for virtual register types */ static unsigned reg_type_to_mask[WORK_STRIDE] = { @@ -149,8 +155,8 @@ index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg) /* Apply shadow registers */ - if (phys == SHADOW_R27) - phys = 27; + if (phys >= SHADOW_R27 && phys <= SHADOW_R29) + phys += 27 - SHADOW_R27; struct phys_reg r = { .reg = phys, @@ -171,6 +177,21 @@ index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg) * work registers, although it is also used to create the register set for * special register allocation */ +static void +add_shadow_conflicts (struct ra_regs *regs, unsigned base, unsigned shadow) +{ + for (unsigned a = 0; a < WORK_STRIDE; ++a) { + unsigned reg_a = (WORK_STRIDE * base) + a; + + for (unsigned b = 0; b < WORK_STRIDE; ++b) { + unsigned reg_b = (WORK_STRIDE * shadow) + b; + + ra_add_reg_conflict(regs, reg_a, reg_b); + ra_add_reg_conflict(regs, reg_b, reg_a); + } + } +} + static struct ra_regs * create_register_set(unsigned work_count, unsigned *classes) { @@ -199,7 +220,9 @@ create_register_set(unsigned work_count, unsigned *classes) unsigned first_reg = (c == REG_CLASS_LDST) ? 26 : (c == REG_CLASS_LDST27) ? SHADOW_R27 : - (c == REG_CLASS_TEX) ? 28 : 0; + (c == REG_CLASS_TEXR) ? 28 : + (c == REG_CLASS_TEXW) ? SHADOW_R28 : + 0; /* Add the full set of work registers */ for (unsigned i = first_reg; i < (first_reg + count); ++i) { @@ -232,19 +255,10 @@ create_register_set(unsigned work_count, unsigned *classes) } - /* All of the r27 registers in in LDST conflict with all of the - * registers in LD27 (pseudo/shadow register) */ - - for (unsigned a = 0; a < WORK_STRIDE; ++a) { - unsigned reg_a = (WORK_STRIDE * 27) + a; - - for (unsigned b = 0; b < WORK_STRIDE; ++b) { - unsigned reg_b = (WORK_STRIDE * SHADOW_R27) + b; - - ra_add_reg_conflict(regs, reg_a, reg_b); - ra_add_reg_conflict(regs, reg_b, reg_a); - } - } + /* We have duplicate classes */ + add_shadow_conflicts(regs, 27, SHADOW_R27); + add_shadow_conflicts(regs, 28, SHADOW_R28); + add_shadow_conflicts(regs, 29, SHADOW_R29); /* We're done setting up */ ra_set_finalize(regs, NULL); @@ -343,8 +357,37 @@ check_read_class(unsigned *classes, unsigned tag, unsigned node) case REG_CLASS_LDST: case REG_CLASS_LDST27: return (tag == TAG_LOAD_STORE_4); - default: + case REG_CLASS_TEXR: + return (tag == TAG_TEXTURE_4); + case REG_CLASS_TEXW: return (tag != TAG_LOAD_STORE_4); + case REG_CLASS_WORK: + return (tag == TAG_ALU_4); + default: + unreachable("Invalid class"); + } +} + +static bool +check_write_class(unsigned *classes, unsigned tag, unsigned node) +{ + /* Non-nodes are implicitly ok */ + if ((node < 0) || (node >= SSA_FIXED_MINIMUM)) + return true; + + unsigned current_class = classes[node] >> 2; + + switch (current_class) { + case REG_CLASS_TEXR: + return true; + case REG_CLASS_TEXW: + return (tag == TAG_TEXTURE_4); + case REG_CLASS_LDST: + case REG_CLASS_LDST27: + case REG_CLASS_WORK: + return (tag == TAG_ALU_4) || (tag == TAG_LOAD_STORE_4); + default: + unreachable("Invalid class"); } } @@ -359,21 +402,6 @@ mark_node_class (unsigned *bitfield, unsigned node) BITSET_SET(bitfield, node); } -static midgard_instruction * -mir_find_last_write(compiler_context *ctx, unsigned i) -{ - midgard_instruction *last_write = NULL; - - mir_foreach_instr_global(ctx, ins) { - if (ins->compact_branch) continue; - - if (ins->ssa_args.dest == i) - last_write = ins; - } - - return last_write; -} - void mir_lower_special_reads(compiler_context *ctx) { @@ -382,6 +410,7 @@ mir_lower_special_reads(compiler_context *ctx) /* Bitfields for the various types of registers we could have */ unsigned *alur = calloc(sz, 1); + unsigned *aluw = calloc(sz, 1); unsigned *ldst = calloc(sz, 1); unsigned *texr = calloc(sz, 1); unsigned *texw = calloc(sz, 1); @@ -393,8 +422,12 @@ mir_lower_special_reads(compiler_context *ctx) switch (ins->type) { case TAG_ALU_4: + mark_node_class(aluw, ins->ssa_args.dest); mark_node_class(alur, ins->ssa_args.src0); - mark_node_class(alur, ins->ssa_args.src1); + + if (!ins->ssa_args.inline_constant) + mark_node_class(alur, ins->ssa_args.src1); + break; case TAG_LOAD_STORE_4: mark_node_class(ldst, ins->ssa_args.src0); @@ -420,6 +453,7 @@ mir_lower_special_reads(compiler_context *ctx) for (unsigned i = 0; i < ctx->temp_count; ++i) { bool is_alur = BITSET_TEST(alur, i); + bool is_aluw = BITSET_TEST(aluw, i); bool is_ldst = BITSET_TEST(ldst, i); bool is_texr = BITSET_TEST(texr, i); bool is_texw = BITSET_TEST(texw, i); @@ -434,7 +468,7 @@ mir_lower_special_reads(compiler_context *ctx) (is_alur && (is_ldst || is_texr)) || (is_ldst && (is_alur || is_texr || is_texw)) || (is_texr && (is_alur || is_ldst)) || - (is_texw && (is_ldst)); + (is_texw && (is_aluw || is_ldst)); if (!collision) continue; @@ -442,19 +476,54 @@ mir_lower_special_reads(compiler_context *ctx) /* Use the index as-is as the work copy. Emit copies for * special uses */ - if (is_ldst) { + unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4 }; + bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw }; + + for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) { + if (!collisions[j]) continue; + + /* When the hazard is from reading, we move and rewrite + * sources (typical case). When it's from writing, we + * flip the move and rewrite destinations (obscure, + * only from control flow -- impossible in SSA) */ + + bool hazard_write = (j == 2); + unsigned idx = spill_idx++; - midgard_instruction m = v_mov(i, blank_alu_src, idx); - midgard_instruction *use = mir_next_op(mir_find_last_write(ctx, i)); - assert(use); - mir_insert_instruction_before(use, m); + + midgard_instruction m = hazard_write ? + v_mov(idx, blank_alu_src, i) : + v_mov(i, blank_alu_src, idx); + + /* Insert move after each write */ + mir_foreach_instr_global_safe(ctx, pre_use) { + if (pre_use->compact_branch) continue; + if (pre_use->ssa_args.dest != i) + continue; + + /* If the hazard is writing, we need to + * specific insert moves for the contentious + * class. If the hazard is reading, we insert + * moves whenever it is written */ + + if (hazard_write && pre_use->type != classes[j]) + continue; + + midgard_instruction *use = mir_next_op(pre_use); + assert(use); + mir_insert_instruction_before(use, m); + } /* Rewrite to use */ - mir_rewrite_index_src_tag(ctx, i, idx, TAG_LOAD_STORE_4); + if (hazard_write) + mir_rewrite_index_dst_tag(ctx, i, idx, classes[j]); + else + mir_rewrite_index_src_tag(ctx, i, idx, classes[j]); } } free(alur); + free(aluw); free(ldst); free(texr); free(texw); @@ -530,6 +599,10 @@ allocate_registers(compiler_context *ctx, bool *spilled) force_vec4(found_class, ins->ssa_args.src0); force_vec4(found_class, ins->ssa_args.src1); } + } else if (ins->type == TAG_TEXTURE_4) { + set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW); + set_class(found_class, ins->ssa_args.src0, REG_CLASS_TEXR); + set_class(found_class, ins->ssa_args.src1, REG_CLASS_TEXR); } } @@ -537,9 +610,11 @@ allocate_registers(compiler_context *ctx, bool *spilled) mir_foreach_instr_global(ctx, ins) { if (ins->compact_branch) continue; - /* Non-load-store cannot read load/store */ + assert(check_write_class(found_class, ins->type, ins->ssa_args.dest)); assert(check_read_class(found_class, ins->type, ins->ssa_args.src0)); - assert(check_read_class(found_class, ins->type, ins->ssa_args.src1)); + + if (!ins->ssa_args.inline_constant) + assert(check_read_class(found_class, ins->type, ins->ssa_args.src1)); } for (unsigned i = 0; i < ctx->temp_count; ++i) { @@ -704,8 +779,6 @@ install_registers_instr( if (OP_IS_STORE_R26(ins->load_store.op) && fixed) { ins->load_store.reg = SSA_REG_FROM_FIXED(args.src0); - } else if (ins->load_store.op == midgard_op_st_cubemap_coords) { - ins->load_store.reg = SSA_REG_FROM_FIXED(args.dest); } else if (OP_IS_STORE_VARY(ins->load_store.op)) { struct phys_reg src = index_to_reg(ctx, g, args.src0); assert(src.reg == 26 || src.reg == 27); @@ -718,8 +791,13 @@ install_registers_instr( * whether we are loading or storing -- think about the * logical dataflow */ - unsigned r = OP_IS_STORE(ins->load_store.op) ? + bool encodes_src = + OP_IS_STORE(ins->load_store.op) && + ins->load_store.op != midgard_op_st_cubemap_coords; + + unsigned r = encodes_src ? args.src0 : args.dest; + struct phys_reg src = index_to_reg(ctx, g, r); ins->load_store.reg = src.reg; @@ -735,6 +813,45 @@ install_registers_instr( break; } + case TAG_TEXTURE_4: { + /* Grab RA results */ + struct phys_reg dest = index_to_reg(ctx, g, args.dest); + struct phys_reg coord = index_to_reg(ctx, g, args.src0); + struct phys_reg lod = index_to_reg(ctx, g, args.src1); + + assert(dest.reg == 28 || dest.reg == 29); + assert(coord.reg == 28 || coord.reg == 29); + + /* First, install the texture coordinate */ + ins->texture.in_reg_full = 1; + ins->texture.in_reg_upper = 0; + ins->texture.in_reg_select = coord.reg - 28; + ins->texture.in_reg_swizzle = + compose_swizzle(ins->texture.in_reg_swizzle, 0xF, coord, dest); + + /* Next, install the destination */ + ins->texture.out_full = 1; + ins->texture.out_upper = 0; + ins->texture.out_reg_select = dest.reg - 28; + ins->texture.swizzle = dest.swizzle; + ins->texture.mask = dest.mask; + + /* If there is a register LOD/bias, use it */ + if (args.src1 > -1) { + midgard_tex_register_select sel = { + .select = lod.reg, + .full = 1, + .component = lod.swizzle & 3, + }; + + uint8_t packed; + memcpy(&packed, &sel, sizeof(packed)); + ins->texture.bias = packed; + } + + break; + } + default: break; } diff --git a/src/panfrost/midgard/midgard_schedule.c b/src/panfrost/midgard/midgard_schedule.c index 1fe9f94..a2c0c76 100644 --- a/src/panfrost/midgard/midgard_schedule.c +++ b/src/panfrost/midgard/midgard_schedule.c @@ -685,6 +685,12 @@ schedule_program(compiler_context *ctx) mir_squeeze_index(ctx); mir_lower_special_reads(ctx); + /* Lowering can introduce some dead moves */ + + mir_foreach_block(ctx, block) { + midgard_opt_dead_move_eliminate(ctx, block); + } + do { /* If we spill, find the best spill node and spill it */ @@ -716,24 +722,35 @@ schedule_program(compiler_context *ctx) * registers */ unsigned class = ra_get_node_class(g, spill_node); bool is_special = (class >> 2) != REG_CLASS_WORK; + bool is_special_w = (class >> 2) == REG_CLASS_TEXW; /* Allocate TLS slot (maybe) */ unsigned spill_slot = !is_special ? spill_count++ : 0; + midgard_instruction *spill_move = NULL; /* For TLS, replace all stores to the spilled node. For - * special, just keep as-is; the class will be demoted - * implicitly */ + * special reads, just keep as-is; the class will be demoted + * implicitly. For special writes, spill to a work register */ - if (!is_special) { + if (!is_special || is_special_w) { mir_foreach_instr_global_safe(ctx, ins) { if (ins->compact_branch) continue; if (ins->ssa_args.dest != spill_node) continue; - ins->ssa_args.dest = SSA_FIXED_REGISTER(26); - midgard_instruction st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask); - mir_insert_instruction_before(mir_next_op(ins), st); + midgard_instruction st; + + if (is_special_w) { + spill_slot = spill_index++; + st = v_mov(spill_node, blank_alu_src, spill_slot); + } else { + ins->ssa_args.dest = SSA_FIXED_REGISTER(26); + st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask); + } - ctx->spills++; + spill_move = mir_insert_instruction_before(mir_next_op(ins), st); + + if (!is_special) + ctx->spills++; } } @@ -753,6 +770,9 @@ schedule_program(compiler_context *ctx) mir_foreach_instr_in_block(block, ins) { if (ins->compact_branch) continue; + + /* We can't rewrite the move used to spill in the first place */ + if (ins == spill_move) continue; if (!mir_has_arg(ins, spill_node)) { consecutive_skip = false; @@ -765,27 +785,32 @@ schedule_program(compiler_context *ctx) continue; } - consecutive_index = ++spill_index; + if (!is_special_w) { + consecutive_index = ++spill_index; + + midgard_instruction *before = ins; - midgard_instruction *before = ins; + /* For a csel, go back one more not to break up the bundle */ + if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op)) + before = mir_prev_op(before); - /* For a csel, go back one more not to break up the bundle */ - if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op)) - before = mir_prev_op(before); + midgard_instruction st; - midgard_instruction st; + if (is_special) { + /* Move */ + st = v_mov(spill_node, blank_alu_src, consecutive_index); + } else { + /* TLS load */ + st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF); + } - if (is_special) { - /* Move */ - st = v_mov(spill_node, blank_alu_src, consecutive_index); + mir_insert_instruction_before(before, st); + // consecutive_skip = true; } else { - /* TLS load */ - st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF); + /* Special writes already have their move spilled in */ + consecutive_index = spill_slot; } - mir_insert_instruction_before(before, st); - // consecutive_skip = true; - /* Rewrite to use */ mir_rewrite_index_src_single(ins, spill_node, consecutive_index); -- 2.7.4