2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #define DESCEND_ARBITRARY(j, f) \
27 b->pass_seq = ctx->pc->pass_seq; \
29 for (j = 0; j < 2; ++j) \
30 if (b->out[j] && b->out[j]->pass_seq < ctx->pc->pass_seq) \
34 extern unsigned nv50_inst_min_size(struct nv_instruction *);
41 values_equal(struct nv_value *a, struct nv_value *b)
44 return (a->reg.file == b->reg.file && a->join->reg.id == b->join->reg.id);
48 inst_commutation_check(struct nv_instruction *a,
49 struct nv_instruction *b)
53 for (di = 0; di < 4; ++di) {
56 for (si = 0; si < 5; ++si) {
59 if (values_equal(a->def[di], b->src[si]->value))
64 if (b->flags_src && b->flags_src->value == a->flags_def)
70 /* Check whether we can swap the order of the instructions,
71 * where a & b may be either the earlier or the later one.
74 inst_commutation_legal(struct nv_instruction *a,
75 struct nv_instruction *b)
77 return inst_commutation_check(a, b) && inst_commutation_check(b, a);
81 inst_cullable(struct nv_instruction *nvi)
83 return (!(nvi->is_terminator ||
86 nv_nvi_refcount(nvi)));
90 nvi_isnop(struct nv_instruction *nvi)
92 if (nvi->opcode == NV_OP_EXPORT)
101 if (nvi->def[0]->join->reg.id < 0)
104 if (nvi->opcode != NV_OP_MOV && nvi->opcode != NV_OP_SELECT)
107 if (nvi->def[0]->reg.file != nvi->src[0]->value->reg.file)
110 if (nvi->src[0]->value->join->reg.id < 0) {
111 debug_printf("nvi_isnop: orphaned value detected\n");
115 if (nvi->opcode == NV_OP_SELECT)
116 if (!values_equal(nvi->def[0], nvi->src[1]->value))
119 return values_equal(nvi->def[0], nvi->src[0]->value);
123 nv_pc_pass_pre_emission(struct nv_pc *pc, struct nv_basic_block *b)
125 struct nv_basic_block *in;
126 struct nv_instruction *nvi, *next;
132 for (j = pc->num_blocks - 1; j >= 0 && !pc->bb_list[j]->bin_size; --j);
136 /* check for no-op branches (BRA $PC+8) */
137 if (in->exit && in->exit->opcode == NV_OP_BRA && in->exit->target == b) {
141 for (++j; j < pc->num_blocks; ++j)
142 pc->bb_list[j]->bin_pos -= 8;
144 nv_nvi_delete(in->exit);
146 b->bin_pos = in->bin_pos + in->bin_size;
149 pc->bb_list[pc->num_blocks++] = b;
153 for (nvi = b->entry; nvi; nvi = next) {
159 for (nvi = b->entry; nvi; nvi = next) {
162 size = nv50_inst_min_size(nvi);
163 if (nvi->next && size < 8)
166 if ((n32 & 1) && nvi->next &&
167 nv50_inst_min_size(nvi->next) == 4 &&
168 inst_commutation_legal(nvi, nvi->next)) {
170 debug_printf("permuting: ");
171 nv_print_instruction(nvi);
172 nv_print_instruction(nvi->next);
173 nv_nvi_permute(nvi, nvi->next);
178 b->bin_size += n32 & 1;
180 nvi->prev->is_long = 1;
183 b->bin_size += 1 + nvi->is_long;
187 debug_printf("block %p is now empty\n", b);
189 if (!b->exit->is_long) {
191 b->exit->is_long = 1;
194 /* might have del'd a hole tail of instructions */
195 if (!b->exit->prev->is_long && !(n32 & 1)) {
197 b->exit->prev->is_long = 1;
200 assert(!b->entry || (b->exit && b->exit->is_long));
202 pc->bin_size += b->bin_size *= 4;
208 if (!b->out[1] && ++(b->out[0]->priv) != b->out[0]->num_in)
211 for (j = 0; j < 2; ++j)
212 if (b->out[j] && b->out[j] != b)
213 nv_pc_pass_pre_emission(pc, b->out[j]);
217 nv_pc_exec_pass2(struct nv_pc *pc)
219 debug_printf("preparing %u blocks for emission\n", pc->num_blocks);
221 pc->bb_list = CALLOC(pc->num_blocks, sizeof(struct nv_basic_block *));
224 nv_pc_pass_pre_emission(pc, pc->root);
229 static INLINE boolean
230 is_cmem_load(struct nv_instruction *nvi)
232 return (nvi->opcode == NV_OP_LDA &&
233 nvi->src[0]->value->reg.file >= NV_FILE_MEM_C(0) &&
234 nvi->src[0]->value->reg.file <= NV_FILE_MEM_C(15));
237 static INLINE boolean
238 is_smem_load(struct nv_instruction *nvi)
240 return (nvi->opcode == NV_OP_LDA &&
241 (nvi->src[0]->value->reg.file == NV_FILE_MEM_S ||
242 nvi->src[0]->value->reg.file <= NV_FILE_MEM_P));
245 static INLINE boolean
246 is_immd_move(struct nv_instruction *nvi)
248 return (nvi->opcode == NV_OP_MOV &&
249 nvi->src[0]->value->reg.file == NV_FILE_IMM);
253 check_swap_src_0_1(struct nv_instruction *nvi)
255 static const ubyte cc_swapped[8] = { 0, 4, 2, 6, 1, 5, 3, 7 };
257 struct nv_ref *src0 = nvi->src[0], *src1 = nvi->src[1];
259 if (!nv_op_commutative(nvi->opcode))
261 assert(src0 && src1);
263 if (is_cmem_load(src0->value->insn)) {
264 if (!is_cmem_load(src1->value->insn)) {
267 /* debug_printf("swapping cmem load to 1\n"); */
270 if (is_smem_load(src1->value->insn)) {
271 if (!is_smem_load(src0->value->insn)) {
274 /* debug_printf("swapping smem load to 0\n"); */
278 if (nvi->opcode == NV_OP_SET && nvi->src[0] != src0)
279 nvi->set_cond = cc_swapped[nvi->set_cond];
289 nv_pass_fold_stores(struct nv_pass *ctx, struct nv_basic_block *b)
291 struct nv_instruction *nvi, *sti;
294 for (sti = b->entry; sti; sti = sti->next) {
295 if (!sti->def[0] || sti->def[0]->reg.file != NV_FILE_OUT)
298 /* only handling MOV to $oX here */
299 if (sti->opcode != NV_OP_MOV && sti->opcode != NV_OP_STA)
302 nvi = sti->src[0]->value->insn;
303 if (!nvi || nvi->opcode == NV_OP_PHI)
305 assert(nvi->def[0] == sti->src[0]->value);
307 if (nvi->def[0]->refc > 1)
310 /* cannot MOV immediate to $oX */
311 if (nvi->src[0]->value->reg.file == NV_FILE_IMM)
314 nvi->def[0] = sti->def[0];
316 nvi->fixed = sti->fixed;
319 DESCEND_ARBITRARY(j, nv_pass_fold_stores);
325 nv_pass_fold_loads(struct nv_pass *ctx, struct nv_basic_block *b)
327 struct nv_instruction *nvi, *ld;
330 for (nvi = b->entry; nvi; nvi = nvi->next) {
331 check_swap_src_0_1(nvi);
333 for (j = 0; j < 3; ++j) {
336 ld = nvi->src[j]->value->insn;
340 if (is_immd_move(ld) && nv50_nvi_can_use_imm(nvi, j)) {
341 nv_reference(ctx->pc, &nvi->src[j], ld->src[0]->value);
342 debug_printf("folded immediate %i\n", ld->def[0]->n);
346 if (ld->opcode != NV_OP_LDA)
348 if (!nv50_nvi_can_load(nvi, j, ld->src[0]->value))
351 if (j == 0 && ld->src[4]) /* can't load shared mem */
354 /* fold it ! */ /* XXX: ref->insn */
355 nv_reference(ctx->pc, &nvi->src[j], ld->src[0]->value);
357 nv_reference(ctx->pc, &nvi->src[4], ld->src[4]->value);
360 DESCEND_ARBITRARY(j, nv_pass_fold_loads);
366 nv_pass_lower_mods(struct nv_pass *ctx, struct nv_basic_block *b)
369 struct nv_instruction *nvi, *mi, *next;
372 for (nvi = b->entry; nvi; nvi = next) {
374 if (nvi->opcode == NV_OP_SUB) {
375 nvi->opcode = NV_OP_ADD;
376 nvi->src[1]->mod ^= NV_MOD_NEG;
379 /* should not put any modifiers on NEG and ABS */
380 assert(nvi->opcode != NV_MOD_NEG || !nvi->src[0]->mod);
381 assert(nvi->opcode != NV_MOD_ABS || !nvi->src[0]->mod);
383 for (j = 0; j < 4; ++j) {
387 mi = nvi->src[j]->value->insn;
390 if (mi->def[0]->refc > 1)
393 if (mi->opcode == NV_OP_NEG) mod = NV_MOD_NEG;
395 if (mi->opcode == NV_OP_ABS) mod = NV_MOD_ABS;
399 if (nvi->opcode == NV_OP_ABS)
400 mod &= ~(NV_MOD_NEG | NV_MOD_ABS);
402 if (nvi->opcode == NV_OP_NEG && mod == NV_MOD_NEG) {
403 nvi->opcode = NV_OP_MOV;
407 if (!(nv50_supported_src_mods(nvi->opcode, j) & mod))
410 nv_reference(ctx->pc, &nvi->src[j], mi->src[0]->value);
412 nvi->src[j]->mod ^= mod;
415 if (nvi->opcode == NV_OP_SAT) {
416 mi = nvi->src[0]->value->insn;
418 if ((mi->opcode == NV_OP_MAD) && !mi->flags_def) {
420 mi->def[0] = nvi->def[0];
425 DESCEND_ARBITRARY(j, nv_pass_lower_mods);
430 #define SRC_IS_MUL(s) ((s)->insn && (s)->insn->opcode == NV_OP_MUL)
432 static struct nv_value *
433 find_immediate(struct nv_ref *ref)
435 struct nv_value *src;
441 while (src->insn && src->insn->opcode == NV_OP_MOV) {
442 assert(!src->insn->src[0]->mod);
443 src = src->insn->src[0]->value;
445 return (src->reg.file == NV_FILE_IMM) ? src : NULL;
449 constant_operand(struct nv_pc *pc,
450 struct nv_instruction *nvi, struct nv_value *val, int s)
457 type = nvi->def[0]->reg.type;
459 switch (nvi->opcode) {
461 if ((type == NV_TYPE_F32 && val->reg.imm.f32 == 1.0f) ||
462 (NV_TYPE_ISINT(type) && val->reg.imm.u32 == 1)) {
463 nvi->opcode = NV_OP_MOV;
464 nv_reference(pc, &nvi->src[s], NULL);
466 nvi->src[0] = nvi->src[1];
470 if ((type == NV_TYPE_F32 && val->reg.imm.f32 == 2.0f) ||
471 (NV_TYPE_ISINT(type) && val->reg.imm.u32 == 2)) {
472 nvi->opcode = NV_OP_ADD;
473 nv_reference(pc, &nvi->src[s], nvi->src[t]->value);
475 if (type == NV_TYPE_F32 && val->reg.imm.f32 == -1.0f) {
476 nvi->opcode = NV_OP_NEG;
477 nv_reference(pc, &nvi->src[s], NULL);
478 nvi->src[0] = nvi->src[t];
481 if (type == NV_TYPE_F32 && val->reg.imm.f32 == -2.0f) {
482 nvi->opcode = NV_OP_ADD;
483 assert(!nvi->src[s]->mod);
484 nv_reference(pc, &nvi->src[s], nvi->src[t]->value);
485 nvi->src[t]->mod ^= NV_MOD_NEG;
486 nvi->src[s]->mod |= NV_MOD_NEG;
488 if (val->reg.imm.u32 == 0) {
489 nvi->opcode = NV_OP_MOV;
490 nv_reference(pc, &nvi->src[t], NULL);
492 nvi->src[0] = nvi->src[1];
498 if (val->reg.imm.u32 == 0) {
499 nvi->opcode = NV_OP_MOV;
500 nv_reference(pc, &nvi->src[s], NULL);
501 nvi->src[0] = nvi->src[t];
511 nv_pass_lower_arith(struct nv_pass *ctx, struct nv_basic_block *b)
513 struct nv_instruction *nvi, *next;
516 for (nvi = b->entry; nvi; nvi = next) {
517 struct nv_value *src0, *src1, *src;
522 if ((src = find_immediate(nvi->src[0])) != NULL)
523 constant_operand(ctx->pc, nvi, src, 0);
525 if ((src = find_immediate(nvi->src[1])) != NULL)
526 constant_operand(ctx->pc, nvi, src, 1);
528 /* try to combine MUL, ADD into MAD */
529 if (nvi->opcode != NV_OP_ADD)
532 src0 = nvi->src[0]->value;
533 src1 = nvi->src[1]->value;
535 if (SRC_IS_MUL(src0) && src0->refc == 1)
538 if (SRC_IS_MUL(src1) && src1->refc == 1)
543 nvi->opcode = NV_OP_MAD;
544 mod = nvi->src[(src == src0) ? 0 : 1]->mod;
545 nv_reference(ctx->pc, &nvi->src[(src == src0) ? 0 : 1], NULL);
546 nvi->src[2] = nvi->src[(src == src0) ? 1 : 0];
548 assert(!(mod & ~NV_MOD_NEG));
549 nvi->src[0] = new_ref(ctx->pc, src->insn->src[0]->value);
550 nvi->src[1] = new_ref(ctx->pc, src->insn->src[1]->value);
551 nvi->src[0]->mod = src->insn->src[0]->mod ^ mod;
552 nvi->src[1]->mod = src->insn->src[1]->mod;
554 DESCEND_ARBITRARY(j, nv_pass_lower_arith);
560 set $r2 g f32 $r2 $r3
561 cvt abs rn f32 $r2 s32 $r2
562 cvt f32 $c0 # f32 $r2
567 nv_pass_lower_cond(struct nv_pass *ctx, struct nv_basic_block *b)
569 /* XXX: easier in IR builder for now */
574 /* TODO: redundant store elimination */
577 struct load_record *next;
579 struct nv_value *value;
582 #define LOAD_RECORD_POOL_SIZE 1024
584 struct nv_pass_reld_elim {
587 struct load_record *imm;
588 struct load_record *mem_s;
589 struct load_record *mem_v;
590 struct load_record *mem_c[16];
591 struct load_record *mem_l;
593 struct load_record pool[LOAD_RECORD_POOL_SIZE];
598 nv_pass_reload_elim(struct nv_pass_reld_elim *ctx, struct nv_basic_block *b)
600 struct load_record **rec, *it;
601 struct nv_instruction *ld, *next;
603 struct nv_value *val;
606 for (ld = b->entry; ld; ld = next) {
610 val = ld->src[0]->value;
613 if (ld->opcode == NV_OP_LINTERP || ld->opcode == NV_OP_PINTERP) {
617 if (ld->opcode == NV_OP_LDA) {
619 if (val->reg.file >= NV_FILE_MEM_C(0) &&
620 val->reg.file <= NV_FILE_MEM_C(15))
621 rec = &ctx->mem_c[val->reg.file - NV_FILE_MEM_C(0)];
623 if (val->reg.file == NV_FILE_MEM_S)
626 if (val->reg.file == NV_FILE_MEM_L)
629 if ((ld->opcode == NV_OP_MOV) && (val->reg.file == NV_FILE_IMM)) {
630 data = val->reg.imm.u32;
634 if (!rec || !ld->def[0]->refc)
637 for (it = *rec; it; it = it->next)
638 if (it->data == data)
643 nvcg_replace_value(ctx->pc, ld->def[0], it->value);
645 ld->opcode = NV_OP_MOV;
646 nv_reference(ctx->pc, &ld->src[0], it->value);
649 if (ctx->alloc == LOAD_RECORD_POOL_SIZE)
651 it = &ctx->pool[ctx->alloc++];
654 it->value = ld->def[0];
662 for (j = 0; j < 16; ++j)
663 ctx->mem_c[j] = NULL;
667 DESCEND_ARBITRARY(j, nv_pass_reload_elim);
673 nv_pass_tex_mask(struct nv_pass *ctx, struct nv_basic_block *b)
677 for (i = 0; i < ctx->pc->num_instructions; ++i) {
678 struct nv_instruction *nvi = &ctx->pc->instructions[i];
679 struct nv_value *def[4];
681 if (!nv_is_vector_op(nvi->opcode))
685 for (c = 0; c < 4; ++c) {
686 if (nvi->def[c]->refc)
687 nvi->tex_mask |= 1 << c;
688 def[c] = nvi->def[c];
692 for (c = 0; c < 4; ++c)
693 if (nvi->tex_mask & (1 << c))
694 nvi->def[j++] = def[c];
695 for (c = 0; c < 4; ++c)
696 if (!(nvi->tex_mask & (1 << c)))
697 nvi->def[j++] = def[c];
709 nv_pass_dce(struct nv_pass_dce *ctx, struct nv_basic_block *b)
712 struct nv_instruction *nvi, *next;
714 for (nvi = b->entry; nvi; nvi = next) {
717 if (inst_cullable(nvi)) {
723 DESCEND_ARBITRARY(j, nv_pass_dce);
728 static INLINE boolean
729 bb_simple_if_endif(struct nv_basic_block *bb)
731 return (bb->out[0] && bb->out[1] &&
732 bb->out[0]->out[0] == bb->out[1] &&
733 !bb->out[0]->out[1]);
737 nv_pass_flatten(struct nv_pass *ctx, struct nv_basic_block *b)
741 if (bb_simple_if_endif(b)) {
743 debug_printf("nv_pass_flatten: total IF/ENDIF constructs: %i\n", ctx->n);
745 DESCEND_ARBITRARY(j, nv_pass_flatten);
750 /* local common subexpression elimination, stupid O(n^2) implementation */
752 nv_pass_cse(struct nv_pass *ctx, struct nv_basic_block *b)
754 struct nv_instruction *ir, *ik, *next;
755 struct nv_instruction *entry = b->phi ? b->phi : b->entry;
761 for (ir = entry; ir; ir = next) {
763 for (ik = entry; ik != ir; ik = ik->next) {
764 if (ir->opcode != ik->opcode)
767 if (ik->opcode == NV_OP_LDA ||
768 ik->opcode == NV_OP_STA ||
769 ik->opcode == NV_OP_MOV ||
770 nv_is_vector_op(ik->opcode))
771 continue; /* ignore loads, stores & moves */
773 if (ik->src[4] || ir->src[4])
774 continue; /* don't mess with address registers */
776 for (s = 0; s < 3; ++s) {
777 struct nv_value *a, *b;
784 if (ik->src[s]->mod != ir->src[s]->mod)
786 a = ik->src[s]->value;
787 b = ir->src[s]->value;
790 if (a->reg.file != b->reg.file ||
792 a->reg.id != b->reg.id)
798 nvcg_replace_value(ctx->pc, ir->def[0], ik->def[0]);
805 DESCEND_ARBITRARY(s, nv_pass_cse);
811 nv_pc_exec_pass0(struct nv_pc *pc)
813 struct nv_pass_reld_elim *reldelim;
815 struct nv_pass_dce dce;
821 ret = nv_pass_flatten(&pass, pc->root);
825 /* Do this first, so we don't have to pay attention
826 * to whether sources are supported memory loads.
829 ret = nv_pass_lower_arith(&pass, pc->root);
834 ret = nv_pass_fold_loads(&pass, pc->root);
839 ret = nv_pass_fold_stores(&pass, pc->root);
843 reldelim = CALLOC_STRUCT(nv_pass_reld_elim);
846 ret = nv_pass_reload_elim(reldelim, pc->root);
852 ret = nv_pass_cse(&pass, pc->root);
857 ret = nv_pass_lower_mods(&pass, pc->root);
865 ret = nv_pass_dce(&dce, pc->root);
868 } while (dce.removed);
870 ret = nv_pass_tex_mask(&pass, pc->root);