2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 /* #define NV50PC_DEBUG */
27 #define DESCEND_ARBITRARY(j, f) \
29 b->pass_seq = ctx->pc->pass_seq; \
31 for (j = 0; j < 2; ++j) \
32 if (b->out[j] && b->out[j]->pass_seq < ctx->pc->pass_seq) \
36 extern unsigned nv50_inst_min_size(struct nv_instruction *);
43 values_equal(struct nv_value *a, struct nv_value *b)
46 return (a->reg.file == b->reg.file && a->join->reg.id == b->join->reg.id);
50 inst_commutation_check(struct nv_instruction *a,
51 struct nv_instruction *b)
55 for (di = 0; di < 4; ++di) {
58 for (si = 0; si < 5; ++si) {
61 if (values_equal(a->def[di], b->src[si]->value))
66 if (b->flags_src && b->flags_src->value == a->flags_def)
72 /* Check whether we can swap the order of the instructions,
73 * where a & b may be either the earlier or the later one.
76 inst_commutation_legal(struct nv_instruction *a,
77 struct nv_instruction *b)
79 return inst_commutation_check(a, b) && inst_commutation_check(b, a);
83 inst_cullable(struct nv_instruction *nvi)
85 return (!(nvi->is_terminator || nvi->is_join ||
88 nv_nvi_refcount(nvi)));
92 nvi_isnop(struct nv_instruction *nvi)
94 if (nvi->opcode == NV_OP_EXPORT || nvi->opcode == NV_OP_UNDEF)
97 /* NOTE: 'fixed' now only means that it shouldn't be optimized away,
98 * but we can still remove it if it is a no-op move.
100 if (/* nvi->fixed || */
101 /* nvi->flags_src || */ /* cond. MOV to same register is still NOP */
103 nvi->is_terminator ||
107 if (nvi->def[0] && nvi->def[0]->join->reg.id < 0)
110 if (nvi->opcode != NV_OP_MOV && nvi->opcode != NV_OP_SELECT)
113 if (nvi->def[0]->reg.file != nvi->src[0]->value->reg.file)
116 if (nvi->src[0]->value->join->reg.id < 0) {
117 NV50_DBGMSG("nvi_isnop: orphaned value detected\n");
121 if (nvi->opcode == NV_OP_SELECT)
122 if (!values_equal(nvi->def[0], nvi->src[1]->value))
125 return values_equal(nvi->def[0], nvi->src[0]->value);
135 nv_pass_flatten(struct nv_pass *ctx, struct nv_basic_block *b);
138 nv_pc_pass_pre_emission(void *priv, struct nv_basic_block *b)
140 struct nv_pc *pc = (struct nv_pc *)priv;
141 struct nv_basic_block *in;
142 struct nv_instruction *nvi, *next;
146 for (j = pc->num_blocks - 1; j >= 0 && !pc->bb_list[j]->bin_size; --j);
150 /* check for no-op branches (BRA $PC+8) */
151 if (in->exit && in->exit->opcode == NV_OP_BRA && in->exit->target == b) {
155 for (++j; j < pc->num_blocks; ++j)
156 pc->bb_list[j]->bin_pos -= 8;
158 nv_nvi_delete(in->exit);
160 b->bin_pos = in->bin_pos + in->bin_size;
163 pc->bb_list[pc->num_blocks++] = b;
167 for (nvi = b->entry; nvi; nvi = next) {
173 for (nvi = b->entry; nvi; nvi = next) {
176 size = nv50_inst_min_size(nvi);
177 if (nvi->next && size < 8)
180 if ((n32 & 1) && nvi->next &&
181 nv50_inst_min_size(nvi->next) == 4 &&
182 inst_commutation_legal(nvi, nvi->next)) {
184 nv_nvi_permute(nvi, nvi->next);
189 b->bin_size += n32 & 1;
191 nvi->prev->is_long = 1;
194 b->bin_size += 1 + nvi->is_long;
198 NV50_DBGMSG("block %p is now empty\n", b);
200 if (!b->exit->is_long) {
202 b->exit->is_long = 1;
205 /* might have del'd a hole tail of instructions */
206 if (!b->exit->prev->is_long && !(n32 & 1)) {
208 b->exit->prev->is_long = 1;
211 assert(!b->entry || (b->exit && b->exit->is_long));
213 pc->bin_size += b->bin_size *= 4;
217 nv_pc_pass2(struct nv_pc *pc, struct nv_basic_block *root)
225 nv_pass_flatten(&pass, root);
227 nv_pc_pass_in_order(root, nv_pc_pass_pre_emission, pc);
233 nv_pc_exec_pass2(struct nv_pc *pc)
237 NV50_DBGMSG("preparing %u blocks for emission\n", pc->num_blocks);
239 pc->bb_list = CALLOC(pc->num_blocks, sizeof(pc->bb_list[0]));
243 for (i = 0; i < pc->num_subroutines + 1; ++i)
244 if (pc->root[i] && (ret = nv_pc_pass2(pc, pc->root[i])))
249 static INLINE boolean
250 is_cmem_load(struct nv_instruction *nvi)
252 return (nvi->opcode == NV_OP_LDA &&
253 nvi->src[0]->value->reg.file >= NV_FILE_MEM_C(0) &&
254 nvi->src[0]->value->reg.file <= NV_FILE_MEM_C(15));
257 static INLINE boolean
258 is_smem_load(struct nv_instruction *nvi)
260 return (nvi->opcode == NV_OP_LDA &&
261 (nvi->src[0]->value->reg.file == NV_FILE_MEM_S ||
262 nvi->src[0]->value->reg.file <= NV_FILE_MEM_P));
265 static INLINE boolean
266 is_immd_move(struct nv_instruction *nvi)
268 return (nvi->opcode == NV_OP_MOV &&
269 nvi->src[0]->value->reg.file == NV_FILE_IMM);
273 check_swap_src_0_1(struct nv_instruction *nvi)
275 static const ubyte cc_swapped[8] = { 0, 4, 2, 6, 1, 5, 3, 7 };
277 struct nv_ref *src0 = nvi->src[0], *src1 = nvi->src[1];
279 if (!nv_op_commutative(nvi->opcode))
281 assert(src0 && src1);
283 if (src1->value->reg.file == NV_FILE_IMM)
286 if (is_cmem_load(src0->value->insn)) {
287 if (!is_cmem_load(src1->value->insn)) {
290 /* debug_printf("swapping cmem load to 1\n"); */
293 if (is_smem_load(src1->value->insn)) {
294 if (!is_smem_load(src0->value->insn)) {
297 /* debug_printf("swapping smem load to 0\n"); */
301 if (nvi->opcode == NV_OP_SET && nvi->src[0] != src0)
302 nvi->set_cond = cc_swapped[nvi->set_cond];
306 nv_pass_fold_stores(struct nv_pass *ctx, struct nv_basic_block *b)
308 struct nv_instruction *nvi, *sti, *next;
311 for (sti = b->entry; sti; sti = next) {
314 /* only handling MOV to $oX here */
315 if (!sti->def[0] || sti->def[0]->reg.file != NV_FILE_OUT)
317 if (sti->opcode != NV_OP_MOV && sti->opcode != NV_OP_STA)
320 nvi = sti->src[0]->value->insn;
321 if (!nvi || nvi->opcode == NV_OP_PHI || nv_is_vector_op(nvi->opcode))
323 assert(nvi->def[0] == sti->src[0]->value);
325 if (nvi->def[0]->refc > 1)
328 /* cannot write to $oX when using immediate */
329 for (j = 0; j < 4 && nvi->src[j]; ++j)
330 if (nvi->src[j]->value->reg.file == NV_FILE_IMM)
332 if (j < 4 && nvi->src[j])
335 nvi->def[0] = sti->def[0];
336 nvi->fixed = sti->fixed;
340 DESCEND_ARBITRARY(j, nv_pass_fold_stores);
346 nv_pass_fold_loads(struct nv_pass *ctx, struct nv_basic_block *b)
348 struct nv_instruction *nvi, *ld;
351 for (nvi = b->entry; nvi; nvi = nvi->next) {
352 check_swap_src_0_1(nvi);
354 for (j = 0; j < 3; ++j) {
357 ld = nvi->src[j]->value->insn;
361 if (is_immd_move(ld) && nv50_nvi_can_use_imm(nvi, j)) {
362 nv_reference(ctx->pc, &nvi->src[j], ld->src[0]->value);
366 if (ld->opcode != NV_OP_LDA)
368 if (!nv50_nvi_can_load(nvi, j, ld->src[0]->value))
371 if (j == 0 && ld->src[4]) /* can't load shared mem */
374 /* fold it ! */ /* XXX: ref->insn */
375 nv_reference(ctx->pc, &nvi->src[j], ld->src[0]->value);
377 nv_reference(ctx->pc, &nvi->src[4], ld->src[4]->value);
379 if (!nv_nvi_refcount(ld))
383 DESCEND_ARBITRARY(j, nv_pass_fold_loads);
389 nv_pass_lower_mods(struct nv_pass *ctx, struct nv_basic_block *b)
392 struct nv_instruction *nvi, *mi, *next;
395 for (nvi = b->entry; nvi; nvi = next) {
397 if (nvi->opcode == NV_OP_SUB) {
398 nvi->opcode = NV_OP_ADD;
399 nvi->src[1]->mod ^= NV_MOD_NEG;
402 /* should not put any modifiers on NEG and ABS */
403 assert(nvi->opcode != NV_MOD_NEG || !nvi->src[0]->mod);
404 assert(nvi->opcode != NV_MOD_ABS || !nvi->src[0]->mod);
406 for (j = 0; j < 4; ++j) {
410 mi = nvi->src[j]->value->insn;
413 if (mi->def[0]->refc > 1)
416 if (mi->opcode == NV_OP_NEG) mod = NV_MOD_NEG;
418 if (mi->opcode == NV_OP_ABS) mod = NV_MOD_ABS;
422 if (nvi->opcode == NV_OP_ABS)
423 mod &= ~(NV_MOD_NEG | NV_MOD_ABS);
425 if (nvi->opcode == NV_OP_NEG && mod == NV_MOD_NEG) {
426 nvi->opcode = NV_OP_MOV;
430 if (!(nv50_supported_src_mods(nvi->opcode, j) & mod))
433 nv_reference(ctx->pc, &nvi->src[j], mi->src[0]->value);
435 nvi->src[j]->mod ^= mod;
438 if (nvi->opcode == NV_OP_SAT) {
439 mi = nvi->src[0]->value->insn;
441 if ((mi->opcode == NV_OP_MAD) && !mi->flags_def) {
443 mi->def[0] = nvi->def[0];
448 DESCEND_ARBITRARY(j, nv_pass_lower_mods);
453 #define SRC_IS_MUL(s) ((s)->insn && (s)->insn->opcode == NV_OP_MUL)
456 modifiers_apply(uint32_t *val, ubyte type, ubyte mod)
458 if (mod & NV_MOD_ABS) {
459 if (type == NV_TYPE_F32)
462 if ((*val) & (1 << 31))
465 if (mod & NV_MOD_NEG) {
466 if (type == NV_TYPE_F32)
474 modifiers_opcode(ubyte mod)
477 case NV_MOD_NEG: return NV_OP_NEG;
478 case NV_MOD_ABS: return NV_OP_ABS;
487 constant_expression(struct nv_pc *pc, struct nv_instruction *nvi,
488 struct nv_value *src0, struct nv_value *src1)
490 struct nv_value *val;
500 type = nvi->def[0]->reg.type;
503 u0.u32 = src0->reg.imm.u32;
504 u1.u32 = src1->reg.imm.u32;
506 modifiers_apply(&u0.u32, type, nvi->src[0]->mod);
507 modifiers_apply(&u1.u32, type, nvi->src[1]->mod);
509 switch (nvi->opcode) {
511 if (nvi->src[2]->value->reg.file != NV_FILE_GPR)
516 case NV_TYPE_F32: u.f32 = u0.f32 * u1.f32; break;
517 case NV_TYPE_U32: u.u32 = u0.u32 * u1.u32; break;
518 case NV_TYPE_S32: u.s32 = u0.s32 * u1.s32; break;
526 case NV_TYPE_F32: u.f32 = u0.f32 + u1.f32; break;
527 case NV_TYPE_U32: u.u32 = u0.u32 + u1.u32; break;
528 case NV_TYPE_S32: u.s32 = u0.s32 + u1.s32; break;
536 case NV_TYPE_F32: u.f32 = u0.f32 - u1.f32; break;
537 case NV_TYPE_U32: u.u32 = u0.u32 - u1.u32; break;
538 case NV_TYPE_S32: u.s32 = u0.s32 - u1.s32; break;
548 nvi->opcode = NV_OP_MOV;
550 val = new_value(pc, NV_FILE_IMM, type);
552 val->reg.imm.u32 = u.u32;
554 nv_reference(pc, &nvi->src[1], NULL);
555 nv_reference(pc, &nvi->src[0], val);
557 if (nvi->src[2]) { /* from MAD */
558 nvi->src[1] = nvi->src[0];
559 nvi->src[0] = nvi->src[2];
561 nvi->opcode = NV_OP_ADD;
566 constant_operand(struct nv_pc *pc,
567 struct nv_instruction *nvi, struct nv_value *val, int s)
580 type = nvi->def[0]->reg.type;
582 u.u32 = val->reg.imm.u32;
583 modifiers_apply(&u.u32, type, nvi->src[s]->mod);
585 switch (nvi->opcode) {
587 if ((type == NV_TYPE_F32 && u.f32 == 1.0f) ||
588 (NV_TYPE_ISINT(type) && u.u32 == 1)) {
589 if ((op = modifiers_opcode(nvi->src[t]->mod)) == NV_OP_NOP)
592 nv_reference(pc, &nvi->src[s], NULL);
593 nvi->src[0] = nvi->src[t];
596 if ((type == NV_TYPE_F32 && u.f32 == 2.0f) ||
597 (NV_TYPE_ISINT(type) && u.u32 == 2)) {
598 nvi->opcode = NV_OP_ADD;
599 nv_reference(pc, &nvi->src[s], nvi->src[t]->value);
600 nvi->src[s]->mod = nvi->src[t]->mod;
602 if (type == NV_TYPE_F32 && u.f32 == -1.0f) {
603 if (nvi->src[t]->mod & NV_MOD_NEG)
604 nvi->opcode = NV_OP_MOV;
606 nvi->opcode = NV_OP_NEG;
607 nv_reference(pc, &nvi->src[s], NULL);
608 nvi->src[0] = nvi->src[t];
611 if (type == NV_TYPE_F32 && u.f32 == -2.0f) {
612 nvi->opcode = NV_OP_ADD;
613 nv_reference(pc, &nvi->src[s], nvi->src[t]->value);
614 nvi->src[s]->mod = (nvi->src[t]->mod ^= NV_MOD_NEG);
617 nvi->opcode = NV_OP_MOV;
618 nv_reference(pc, &nvi->src[t], NULL);
620 nvi->src[0] = nvi->src[1];
627 if ((op = modifiers_opcode(nvi->src[t]->mod)) == NV_OP_NOP)
630 nv_reference(pc, &nvi->src[s], NULL);
631 nvi->src[0] = nvi->src[t];
636 u.f32 = 1.0f / u.f32;
637 (val = new_value(pc, NV_FILE_IMM, NV_TYPE_F32))->reg.imm.f32 = u.f32;
638 nvi->opcode = NV_OP_MOV;
640 nv_reference(pc, &nvi->src[0], val);
643 u.f32 = 1.0f / sqrtf(u.f32);
644 (val = new_value(pc, NV_FILE_IMM, NV_TYPE_F32))->reg.imm.f32 = u.f32;
645 nvi->opcode = NV_OP_MOV;
647 nv_reference(pc, &nvi->src[0], val);
653 if (nvi->opcode == NV_OP_MOV && nvi->flags_def) {
654 struct nv_instruction *cvt = new_instruction_at(pc, nvi, NV_OP_CVT);
656 nv_reference(pc, &cvt->src[0], nvi->def[0]);
658 cvt->flags_def = nvi->flags_def;
659 nvi->flags_def = NULL;
664 nv_pass_lower_arith(struct nv_pass *ctx, struct nv_basic_block *b)
666 struct nv_instruction *nvi, *next;
669 for (nvi = b->entry; nvi; nvi = next) {
670 struct nv_value *src0, *src1, *src;
675 src0 = nvcg_find_immediate(nvi->src[0]);
676 src1 = nvcg_find_immediate(nvi->src[1]);
679 constant_expression(ctx->pc, nvi, src0, src1);
682 constant_operand(ctx->pc, nvi, src0, 0);
685 constant_operand(ctx->pc, nvi, src1, 1);
688 /* try to combine MUL, ADD into MAD */
689 if (nvi->opcode != NV_OP_ADD)
692 src0 = nvi->src[0]->value;
693 src1 = nvi->src[1]->value;
695 if (SRC_IS_MUL(src0) && src0->refc == 1)
698 if (SRC_IS_MUL(src1) && src1->refc == 1)
703 nvi->opcode = NV_OP_MAD;
704 mod = nvi->src[(src == src0) ? 0 : 1]->mod;
705 nv_reference(ctx->pc, &nvi->src[(src == src0) ? 0 : 1], NULL);
706 nvi->src[2] = nvi->src[(src == src0) ? 1 : 0];
708 assert(!(mod & ~NV_MOD_NEG));
709 nvi->src[0] = new_ref(ctx->pc, src->insn->src[0]->value);
710 nvi->src[1] = new_ref(ctx->pc, src->insn->src[1]->value);
711 nvi->src[0]->mod = src->insn->src[0]->mod ^ mod;
712 nvi->src[1]->mod = src->insn->src[1]->mod;
714 DESCEND_ARBITRARY(j, nv_pass_lower_arith);
719 /* TODO: redundant store elimination */
722 struct load_record *next;
724 struct nv_value *value;
727 #define LOAD_RECORD_POOL_SIZE 1024
729 struct nv_pass_reld_elim {
732 struct load_record *imm;
733 struct load_record *mem_s;
734 struct load_record *mem_v;
735 struct load_record *mem_c[16];
736 struct load_record *mem_l;
738 struct load_record pool[LOAD_RECORD_POOL_SIZE];
743 nv_pass_reload_elim(struct nv_pass_reld_elim *ctx, struct nv_basic_block *b)
745 struct load_record **rec, *it;
746 struct nv_instruction *ld, *next;
748 struct nv_value *val;
751 for (ld = b->entry; ld; ld = next) {
755 val = ld->src[0]->value;
758 if (ld->opcode == NV_OP_LINTERP || ld->opcode == NV_OP_PINTERP) {
762 if (ld->opcode == NV_OP_LDA) {
764 if (val->reg.file >= NV_FILE_MEM_C(0) &&
765 val->reg.file <= NV_FILE_MEM_C(15))
766 rec = &ctx->mem_c[val->reg.file - NV_FILE_MEM_C(0)];
768 if (val->reg.file == NV_FILE_MEM_S)
771 if (val->reg.file == NV_FILE_MEM_L)
774 if ((ld->opcode == NV_OP_MOV) && (val->reg.file == NV_FILE_IMM)) {
775 data = val->reg.imm.u32;
779 if (!rec || !ld->def[0]->refc)
782 for (it = *rec; it; it = it->next)
783 if (it->data == data)
787 if (ld->def[0]->reg.id >= 0)
788 it->value = ld->def[0];
791 nvcg_replace_value(ctx->pc, ld->def[0], it->value);
793 if (ctx->alloc == LOAD_RECORD_POOL_SIZE)
795 it = &ctx->pool[ctx->alloc++];
798 it->value = ld->def[0];
806 for (j = 0; j < 16; ++j)
807 ctx->mem_c[j] = NULL;
811 DESCEND_ARBITRARY(j, nv_pass_reload_elim);
817 nv_pass_tex_mask(struct nv_pass *ctx, struct nv_basic_block *b)
821 for (i = 0; i < ctx->pc->num_instructions; ++i) {
822 struct nv_instruction *nvi = &ctx->pc->instructions[i];
823 struct nv_value *def[4];
825 if (!nv_is_vector_op(nvi->opcode))
829 for (c = 0; c < 4; ++c) {
830 if (nvi->def[c]->refc)
831 nvi->tex_mask |= 1 << c;
832 def[c] = nvi->def[c];
836 for (c = 0; c < 4; ++c)
837 if (nvi->tex_mask & (1 << c))
838 nvi->def[j++] = def[c];
839 for (c = 0; c < 4; ++c)
840 if (!(nvi->tex_mask & (1 << c)))
841 nvi->def[j++] = def[c];
853 nv_pass_dce(struct nv_pass_dce *ctx, struct nv_basic_block *b)
856 struct nv_instruction *nvi, *next;
858 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = next) {
861 if (inst_cullable(nvi)) {
867 DESCEND_ARBITRARY(j, nv_pass_dce);
872 /* Register allocation inserted ELSE blocks for all IF/ENDIF without ELSE.
873 * Returns TRUE if @bb initiates an IF/ELSE/ENDIF clause, or is an IF with
874 * BREAK and dummy ELSE block.
876 static INLINE boolean
877 bb_is_if_else_endif(struct nv_basic_block *bb)
879 if (!bb->out[0] || !bb->out[1])
882 if (bb->out[0]->out_kind[0] == CFG_EDGE_LOOP_LEAVE) {
883 return (bb->out[0]->out[1] == bb->out[1]->out[0] &&
884 !bb->out[1]->out[1]);
886 return (bb->out[0]->out[0] == bb->out[1]->out[0] &&
887 !bb->out[0]->out[1] &&
888 !bb->out[1]->out[1]);
892 /* predicate instructions and remove branch at the end */
894 predicate_instructions(struct nv_pc *pc, struct nv_basic_block *b,
895 struct nv_value *p, ubyte cc)
897 struct nv_instruction *nvi;
901 for (nvi = b->entry; nvi->next; nvi = nvi->next) {
902 if (!nvi_isnop(nvi)) {
904 nv_reference(pc, &nvi->flags_src, p);
908 if (nvi->opcode == NV_OP_BRA)
911 if (!nvi_isnop(nvi)) {
913 nv_reference(pc, &nvi->flags_src, p);
917 /* NOTE: Run this after register allocation, we can just cut out the cflow
918 * instructions and hook the predicates to the conditional OPs if they are
919 * not using immediates; better than inserting SELECT to join definitions.
921 * NOTE: Should adapt prior optimization to make this possible more often.
924 nv_pass_flatten(struct nv_pass *ctx, struct nv_basic_block *b)
926 struct nv_instruction *nvi;
927 struct nv_value *pred;
931 if (bb_is_if_else_endif(b)) {
933 NV50_DBGMSG("pass_flatten: IF/ELSE/ENDIF construct at BB:%i\n", b->id);
935 for (n0 = 0, nvi = b->out[0]->entry; nvi; nvi = nvi->next, ++n0)
936 if (!nv50_nvi_can_predicate(nvi))
939 for (n1 = 0, nvi = b->out[1]->entry; nvi; nvi = nvi->next, ++n1)
940 if (!nv50_nvi_can_predicate(nvi))
944 debug_printf("cannot predicate: "); nv_print_instruction(nvi);
947 debug_printf("cannot predicate: "); nv_print_instruction(nvi);
951 if (!nvi && n0 < 12 && n1 < 12) { /* 12 as arbitrary limit */
952 assert(b->exit && b->exit->flags_src);
953 pred = b->exit->flags_src->value;
955 predicate_instructions(ctx->pc, b->out[0], pred, NV_CC_NE | NV_CC_U);
956 predicate_instructions(ctx->pc, b->out[1], pred, NV_CC_EQ);
958 assert(b->exit && b->exit->opcode == NV_OP_BRA);
959 nv_nvi_delete(b->exit);
961 if (b->exit && b->exit->opcode == NV_OP_JOINAT)
962 nv_nvi_delete(b->exit);
964 i = (b->out[0]->out_kind[0] == CFG_EDGE_LOOP_LEAVE) ? 1 : 0;
966 if ((nvi = b->out[0]->out[i]->entry)) {
968 if (nvi->opcode == NV_OP_JOIN)
973 DESCEND_ARBITRARY(i, nv_pass_flatten);
978 /* local common subexpression elimination, stupid O(n^2) implementation */
980 nv_pass_cse(struct nv_pass *ctx, struct nv_basic_block *b)
982 struct nv_instruction *ir, *ik, *next;
983 struct nv_instruction *entry = b->phi ? b->phi : b->entry;
989 for (ir = entry; ir; ir = next) {
991 for (ik = entry; ik != ir; ik = ik->next) {
992 if (ir->opcode != ik->opcode || ir->fixed)
995 if (!ir->def[0] || !ik->def[0] ||
996 ik->opcode == NV_OP_LDA ||
997 ik->opcode == NV_OP_STA ||
998 ik->opcode == NV_OP_MOV ||
999 nv_is_vector_op(ik->opcode))
1000 continue; /* ignore loads, stores & moves */
1002 if (ik->src[4] || ir->src[4])
1003 continue; /* don't mess with address registers */
1005 if (ik->flags_src || ir->flags_src ||
1006 ik->flags_def || ir->flags_def)
1007 continue; /* and also not with flags, for now */
1009 if (ik->def[0]->reg.file == NV_FILE_OUT ||
1010 ir->def[0]->reg.file == NV_FILE_OUT ||
1011 !values_equal(ik->def[0], ir->def[0]))
1014 for (s = 0; s < 3; ++s) {
1015 struct nv_value *a, *b;
1022 if (ik->src[s]->mod != ir->src[s]->mod)
1024 a = ik->src[s]->value;
1025 b = ir->src[s]->value;
1028 if (a->reg.file != b->reg.file ||
1030 a->reg.id != b->reg.id)
1036 nvcg_replace_value(ctx->pc, ir->def[0], ik->def[0]);
1043 DESCEND_ARBITRARY(s, nv_pass_cse);
1049 nv_pc_pass0(struct nv_pc *pc, struct nv_basic_block *root)
1051 struct nv_pass_reld_elim *reldelim;
1052 struct nv_pass pass;
1053 struct nv_pass_dce dce;
1059 /* Do this first, so we don't have to pay attention
1060 * to whether sources are supported memory loads.
1063 ret = nv_pass_lower_arith(&pass, root);
1068 ret = nv_pass_fold_loads(&pass, root);
1073 ret = nv_pass_fold_stores(&pass, root);
1077 reldelim = CALLOC_STRUCT(nv_pass_reld_elim);
1080 ret = nv_pass_reload_elim(reldelim, root);
1086 ret = nv_pass_cse(&pass, root);
1091 ret = nv_pass_lower_mods(&pass, root);
1099 ret = nv_pass_dce(&dce, root);
1102 } while (dce.removed);
1104 ret = nv_pass_tex_mask(&pass, root);
1112 nv_pc_exec_pass0(struct nv_pc *pc)
1116 for (i = 0; i < pc->num_subroutines + 1; ++i)
1117 if (pc->root[i] && (ret = nv_pc_pass0(pc, pc->root[i])))