2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "pipe/p_shader_tokens.h"
26 #include "tgsi/tgsi_parse.h"
27 #include "tgsi/tgsi_util.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "util/u_dynarray.h"
32 #include "nvc0_program.h"
34 /* Arbitrary internal limits. */
35 #define BLD_MAX_TEMPS 64
36 #define BLD_MAX_ADDRS 4
37 #define BLD_MAX_PREDS 4
38 #define BLD_MAX_IMMDS 128
39 #define BLD_MAX_OUTPS PIPE_MAX_SHADER_OUTPUTS
41 #define BLD_MAX_COND_NESTING 8
42 #define BLD_MAX_LOOP_NESTING 4
43 #define BLD_MAX_CALL_NESTING 2
45 /* This structure represents a TGSI register. */
47 struct nv_value *current;
48 /* collect all SSA values assigned to it */
49 struct util_dynarray vals;
50 /* 1 bit per loop level, indicates if used/defd, reset when loop ends */
55 static INLINE struct nv_value **
56 bld_register_access(struct bld_register *reg, unsigned i)
58 return util_dynarray_element(®->vals, struct nv_value *, i);
62 bld_register_add_val(struct bld_register *reg, struct nv_value *val)
64 struct nv_basic_block *bb = val->insn->bb;
67 (util_dynarray_top(®->vals, struct nv_value *))->insn->bb == bb)
68 *(util_dynarray_top_ptr(®->vals, struct nv_value *)) = val;
70 util_dynarray_append(®->vals, struct nv_value *, val);
74 bld_register_del_val(struct bld_register *reg, struct nv_value *val)
78 for (i = reg->vals.size / sizeof(struct nv_value *); i > 0; --i)
79 if (*bld_register_access(reg, i - 1) == val)
84 if (i != reg->vals.size / sizeof(struct nv_value *))
85 *bld_register_access(reg, i - 1) = util_dynarray_pop(®->vals,
88 reg->vals.size -= sizeof(struct nv_value *);
94 struct nvc0_translation_info *ti;
97 struct nv_basic_block *b;
99 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
102 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
103 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
104 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
106 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
107 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
110 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
112 struct bld_register tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
113 struct bld_register avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
114 struct bld_register pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
115 struct bld_register ovs[BLD_MAX_OUTPS][4]; /* TGSI_FILE_OUTPUT, FP only */
117 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
120 struct nv_value *zero;
121 struct nv_value *frag_coord[4];
124 struct nv_value *saved_sysvals[4];
125 struct nv_value *saved_addr[4][2];
126 struct nv_value *saved_inputs[PIPE_MAX_SHADER_INPUTS][4];
127 struct nv_value *saved_immd[BLD_MAX_IMMDS];
132 bld_register_file(struct bld_context *bld, struct bld_register *reg)
134 if (reg >= &bld->pvs[0][0] &&
135 reg < &bld->ovs[0][0])
140 static INLINE struct nv_value *
141 bld_fetch(struct bld_context *bld, struct bld_register *regs, int i, int c)
143 regs[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
144 return regs[i * 4 + c].current;
147 static struct nv_value *
148 bld_loop_phi(struct bld_context *, struct bld_register *, struct nv_value *);
150 /* If a variable is defined in a loop without prior use, we don't need
151 * a phi in the loop header to account for backwards flow.
153 * However, if this variable is then also used outside the loop, we do
154 * need a phi after all. But we must not use this phi's def inside the
155 * loop, so we can eliminate the phi if it is unused later.
158 bld_store(struct bld_context *bld,
159 struct bld_register *regs, int i, int c, struct nv_value *val)
161 const uint16_t m = 1 << bld->loop_lvl;
162 struct bld_register *reg = ®s[i * 4 + c];
164 if (bld->loop_lvl && !(m & (reg->loop_def | reg->loop_use)))
165 bld_loop_phi(bld, reg, val);
168 bld_register_add_val(reg, reg->current);
170 reg->loop_def |= 1 << bld->loop_lvl;
173 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
174 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
175 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
176 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
177 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
178 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
179 #define STORE_OUTP(i, c, v) \
181 bld_store(bld, &bld->ovs[0][0], i, c, (v)); \
182 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
186 bld_clear_def_use(struct bld_register *regs, int n, int lvl)
189 const uint16_t mask = ~(1 << lvl);
191 for (i = 0; i < n * 4; ++i) {
192 regs[i].loop_def &= mask;
193 regs[i].loop_use &= mask;
198 bld_warn_uninitialized(struct bld_context *bld, int kind,
199 struct bld_register *reg, struct nv_basic_block *b)
201 #if NV50_DEBUG & NV50_DEBUG_SHADER
202 long i = (reg - &bld->tvs[0][0]) / 4;
203 long c = (reg - &bld->tvs[0][0]) & 3;
207 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
208 i, (int)('x' + c), kind ? "may be" : "is", b->id);
212 static INLINE struct nv_value *
213 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
220 static INLINE struct nv_value *
221 find_by_bb(struct bld_register *reg, struct nv_basic_block *b)
225 if (reg->current && reg->current->insn->bb == b)
228 for (i = 0; i < reg->vals.size / sizeof(struct nv_value *); ++i)
229 if ((*bld_register_access(reg, i))->insn->bb == b)
230 return *bld_register_access(reg, i);
234 /* Fetch value from register that was defined in the specified BB,
235 * or search for first definitions in all of its predecessors.
238 fetch_by_bb(struct bld_register *reg,
239 struct nv_value **vals, int *n,
240 struct nv_basic_block *b)
243 struct nv_value *val;
245 assert(*n < 16); /* MAX_COND_NESTING */
247 val = find_by_bb(reg, b);
249 for (i = 0; i < *n; ++i)
255 for (i = 0; i < b->num_in; ++i)
256 if (!IS_WALL_EDGE(b->in_kind[i]))
257 fetch_by_bb(reg, vals, n, b->in[i]);
260 static INLINE boolean
261 nvc0_bblock_is_terminated(struct nv_basic_block *bb)
263 return bb->exit && bb->exit->terminator;
266 static INLINE struct nv_value *
267 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
269 static INLINE struct nv_value *
270 bld_undef(struct bld_context *bld, ubyte file)
272 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
274 return bld_def(nvi, 0, new_value(bld->pc, file, 4));
277 static struct nv_value *
278 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
279 struct bld_register *reg)
281 struct nv_basic_block *in;
282 struct nv_value *vals[16] = { NULL };
283 struct nv_value *val;
284 struct nv_instruction *phi;
289 fetch_by_bb(reg, vals, &n, b);
292 bld_warn_uninitialized(bld, 0, reg, b);
297 if (nvc0_bblock_dominated_by(b, vals[0]->insn->bb))
300 bld_warn_uninitialized(bld, 1, reg, b);
302 /* back-tracking to insert missing value of other path */
305 if (in->num_in == 1) {
308 if (!nvc0_bblock_reachable_by(in->in[0], vals[0]->insn->bb, b))
311 if (!nvc0_bblock_reachable_by(in->in[1], vals[0]->insn->bb, b))
317 bld->pc->current_block = in;
319 /* should make this a no-op */
320 bld_register_add_val(reg, bld_undef(bld, vals[0]->reg.file));
324 for (i = 0; i < n; ++i) {
325 /* if value dominates b, continue to the redefinitions */
326 if (nvc0_bblock_dominated_by(b, vals[i]->insn->bb))
329 /* if value dominates any in-block, b should be the dom frontier */
330 for (j = 0; j < b->num_in; ++j)
331 if (nvc0_bblock_dominated_by(b->in[j], vals[i]->insn->bb))
333 /* otherwise, find the dominance frontier and put the phi there */
334 if (j == b->num_in) {
335 in = nvc0_bblock_dom_frontier(vals[i]->insn->bb);
336 val = bld_phi(bld, in, reg);
337 bld_register_add_val(reg, val);
343 bld->pc->current_block = b;
348 phi = new_instruction(bld->pc, NV_OP_PHI);
350 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.size));
351 for (i = 0; i < n; ++i)
352 nv_reference(bld->pc, phi, i, vals[i]);
357 /* Insert a phi function in the loop header.
358 * For nested loops, we need to insert phi functions in all the outer
359 * loop headers if they don't have one yet.
361 * @def: redefinition from inside loop, or NULL if to be replaced later
363 static struct nv_value *
364 bld_loop_phi(struct bld_context *bld, struct bld_register *reg,
365 struct nv_value *def)
367 struct nv_instruction *phi;
368 struct nv_basic_block *bb = bld->pc->current_block;
369 struct nv_value *val = NULL;
371 if (bld->ti->require_stores) /* XXX: actually only for INDEXABLE_TEMP */
374 if (bld->loop_lvl > 1) {
376 if (!((reg->loop_def | reg->loop_use) & (1 << bld->loop_lvl)))
377 val = bld_loop_phi(bld, reg, NULL);
382 val = bld_phi(bld, bld->pc->current_block, reg); /* old definition */
384 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
385 val = bld_undef(bld, bld_register_file(bld, reg));
388 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
390 phi = new_instruction(bld->pc, NV_OP_PHI);
392 bld_def(phi, 0, new_value_like(bld->pc, val));
396 bld_register_add_val(reg, phi->def[0]);
398 phi->target = (struct nv_basic_block *)reg; /* cheat */
400 nv_reference(bld->pc, phi, 0, val);
401 nv_reference(bld->pc, phi, 1, def);
403 bld->pc->current_block = bb;
408 static INLINE struct nv_value *
409 bld_fetch_global(struct bld_context *bld, struct bld_register *reg)
411 const uint16_t m = 1 << bld->loop_lvl;
412 const uint16_t use = reg->loop_use;
416 /* If neither used nor def'd inside the loop, build a phi in foresight,
417 * so we don't have to replace stuff later on, which requires tracking.
419 if (bld->loop_lvl && !((use | reg->loop_def) & m))
420 return bld_loop_phi(bld, reg, NULL);
422 return bld_phi(bld, bld->pc->current_block, reg);
425 static INLINE struct nv_value *
426 bld_imm_u32(struct bld_context *bld, uint32_t u)
429 unsigned n = bld->num_immds;
431 for (i = 0; i < n; ++i)
432 if (bld->saved_immd[i]->reg.imm.u32 == u)
433 return bld->saved_immd[i];
435 assert(n < BLD_MAX_IMMDS);
438 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, 4);
439 bld->saved_immd[n]->reg.imm.u32 = u;
440 return bld->saved_immd[n];
444 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
447 /* Replace the source of the phi in the loop header by the last assignment,
448 * or eliminate the phi function if there is no assignment inside the loop.
450 * Redundancy situation 1 - (used) but (not redefined) value:
451 * %3 = phi %0, %3 = %3 is used
452 * %3 = phi %0, %4 = is new definition
454 * Redundancy situation 2 - (not used) but (redefined) value:
455 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
458 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
460 struct nv_basic_block *save = bld->pc->current_block;
461 struct nv_instruction *phi, *next;
462 struct nv_value *val;
463 struct bld_register *reg;
466 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
469 reg = (struct bld_register *)phi->target;
472 /* start with s == 1, src[0] is from outside the loop */
473 for (s = 1, n = 0; n < bb->num_in; ++n) {
474 if (bb->in_kind[n] != CFG_EDGE_BACK)
478 bld->pc->current_block = bb->in[n];
479 val = bld_fetch_global(bld, reg);
481 for (i = 0; i < 4; ++i)
482 if (phi->src[i] && phi->src[i]->value == val)
485 /* skip values we do not want to replace */
486 for (; phi->src[s] && phi->src[s]->value != phi->def[0]; ++s);
487 nv_reference(bld->pc, phi, s++, val);
490 bld->pc->current_block = save;
492 if (phi->src[0]->value == phi->def[0] ||
493 phi->src[0]->value == phi->src[1]->value)
496 if (phi->src[1]->value == phi->def[0])
502 /* eliminate the phi */
503 bld_register_del_val(reg, phi->def[0]);
506 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
508 nvc0_insn_delete(phi);
513 static INLINE struct nv_value *
514 bld_imm_f32(struct bld_context *bld, float f)
516 return bld_imm_u32(bld, fui(f));
519 static struct nv_value *
520 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
522 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
524 nv_reference(bld->pc, insn, 0, src0);
526 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
529 static struct nv_value *
530 bld_insn_2(struct bld_context *bld, uint opcode,
531 struct nv_value *src0, struct nv_value *src1)
533 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
535 nv_reference(bld->pc, insn, 0, src0);
536 nv_reference(bld->pc, insn, 1, src1);
538 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
541 static struct nv_value *
542 bld_insn_3(struct bld_context *bld, uint opcode,
543 struct nv_value *src0, struct nv_value *src1,
544 struct nv_value *src2)
546 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
548 nv_reference(bld->pc, insn, 0, src0);
549 nv_reference(bld->pc, insn, 1, src1);
550 nv_reference(bld->pc, insn, 2, src2);
552 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
556 bld_src_predicate(struct bld_context *bld,
557 struct nv_instruction *nvi, int s, struct nv_value *val)
560 nv_reference(bld->pc, nvi, s, val);
564 bld_src_pointer(struct bld_context *bld,
565 struct nv_instruction *nvi, int s, struct nv_value *val)
568 nv_reference(bld->pc, nvi, s, val);
572 bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
573 struct nv_value *val)
575 struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_ST);
576 struct nv_value *loc;
578 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
580 loc->reg.address = ofst * 4;
582 nv_reference(bld->pc, insn, 0, loc);
583 nv_reference(bld->pc, insn, 1, val);
585 bld_src_pointer(bld, insn, 2, ptr);
588 static struct nv_value *
589 bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
591 struct nv_value *loc, *val;
593 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
595 loc->reg.address = ofst * 4;
597 val = bld_insn_1(bld, NV_OP_LD, loc);
599 bld_src_pointer(bld, val->insn, 1, ptr);
604 static struct nv_value *
605 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
607 struct nv_value *val;
609 val = bld_insn_1(bld, NV_OP_LG2, x);
610 val = bld_insn_2(bld, NV_OP_MUL_F32, e, val);
612 val = bld_insn_1(bld, NV_OP_PREEX2, val);
613 val = bld_insn_1(bld, NV_OP_EX2, val);
618 static INLINE struct nv_value *
619 bld_load_imm_f32(struct bld_context *bld, float f)
623 return bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
626 static INLINE struct nv_value *
627 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
631 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
634 static INLINE struct nv_value *
635 bld_setp(struct bld_context *bld, uint op, uint8_t cc,
636 struct nv_value *src0, struct nv_value *src1)
638 struct nv_value *val = bld_insn_2(bld, op, src0, src1);
640 val->reg.file = NV_FILE_PRED;
642 val->insn->set_cond = cc & 0xf;
646 static INLINE struct nv_value *
647 bld_cvt(struct bld_context *bld, uint8_t dt, uint8_t st, struct nv_value *src)
649 struct nv_value *val = bld_insn_1(bld, NV_OP_CVT, src);
650 val->insn->ext.cvt.d = dt;
651 val->insn->ext.cvt.s = st;
656 bld_kil(struct bld_context *bld, struct nv_value *src)
658 struct nv_instruction *nvi;
660 src = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src, bld->zero);
662 nvi = new_instruction(bld->pc, NV_OP_KIL);
665 bld_src_predicate(bld, nvi, 0, src);
669 bld_flow(struct bld_context *bld, uint opcode,
670 struct nv_value *pred, uint8_t cc, struct nv_basic_block *target,
673 struct nv_instruction *nvi;
676 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
678 nvi = new_instruction(bld->pc, opcode);
679 nvi->target = target;
683 bld_src_predicate(bld, nvi, 0, pred);
688 translate_setcc(unsigned opcode)
691 case TGSI_OPCODE_SLT: return NV_CC_LT;
692 case TGSI_OPCODE_SGE: return NV_CC_GE;
693 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
694 case TGSI_OPCODE_SGT: return NV_CC_GT;
695 case TGSI_OPCODE_SLE: return NV_CC_LE;
696 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
697 case TGSI_OPCODE_STR: return NV_CC_TR;
698 case TGSI_OPCODE_SFL: return NV_CC_FL;
700 case TGSI_OPCODE_ISLT: return NV_CC_LT;
701 case TGSI_OPCODE_ISGE: return NV_CC_GE;
702 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
703 case TGSI_OPCODE_USGE: return NV_CC_GE;
704 case TGSI_OPCODE_USLT: return NV_CC_LT;
705 case TGSI_OPCODE_USNE: return NV_CC_NE;
713 translate_opcode(uint opcode)
716 case TGSI_OPCODE_ABS: return NV_OP_ABS_F32;
717 case TGSI_OPCODE_ADD: return NV_OP_ADD_F32;
718 case TGSI_OPCODE_SUB: return NV_OP_SUB_F32;
719 case TGSI_OPCODE_UADD: return NV_OP_ADD_B32;
720 case TGSI_OPCODE_AND: return NV_OP_AND;
721 case TGSI_OPCODE_EX2: return NV_OP_EX2;
722 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
723 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
724 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
725 case TGSI_OPCODE_COS: return NV_OP_COS;
726 case TGSI_OPCODE_SIN: return NV_OP_SIN;
727 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
728 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
729 case TGSI_OPCODE_F2I:
730 case TGSI_OPCODE_F2U:
731 case TGSI_OPCODE_I2F:
732 case TGSI_OPCODE_U2F: return NV_OP_CVT;
733 case TGSI_OPCODE_INEG: return NV_OP_NEG_S32;
734 case TGSI_OPCODE_LG2: return NV_OP_LG2;
735 case TGSI_OPCODE_ISHR: return NV_OP_SAR;
736 case TGSI_OPCODE_USHR: return NV_OP_SHR;
737 case TGSI_OPCODE_MAD: return NV_OP_MAD_F32;
738 case TGSI_OPCODE_MAX: return NV_OP_MAX_F32;
739 case TGSI_OPCODE_IMAX: return NV_OP_MAX_S32;
740 case TGSI_OPCODE_UMAX: return NV_OP_MAX_U32;
741 case TGSI_OPCODE_MIN: return NV_OP_MIN_F32;
742 case TGSI_OPCODE_IMIN: return NV_OP_MIN_S32;
743 case TGSI_OPCODE_UMIN: return NV_OP_MIN_U32;
744 case TGSI_OPCODE_MUL: return NV_OP_MUL_F32;
745 case TGSI_OPCODE_UMUL: return NV_OP_MUL_B32;
746 case TGSI_OPCODE_OR: return NV_OP_OR;
747 case TGSI_OPCODE_RCP: return NV_OP_RCP;
748 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
749 case TGSI_OPCODE_SAD: return NV_OP_SAD;
750 case TGSI_OPCODE_SHL: return NV_OP_SHL;
751 case TGSI_OPCODE_SLT:
752 case TGSI_OPCODE_SGE:
753 case TGSI_OPCODE_SEQ:
754 case TGSI_OPCODE_SGT:
755 case TGSI_OPCODE_SLE:
756 case TGSI_OPCODE_SNE: return NV_OP_FSET_F32;
757 case TGSI_OPCODE_ISLT:
758 case TGSI_OPCODE_ISGE: return NV_OP_SET_S32;
759 case TGSI_OPCODE_USEQ:
760 case TGSI_OPCODE_USGE:
761 case TGSI_OPCODE_USLT:
762 case TGSI_OPCODE_USNE: return NV_OP_SET_U32;
763 case TGSI_OPCODE_TEX: return NV_OP_TEX;
764 case TGSI_OPCODE_TXP: return NV_OP_TEX;
765 case TGSI_OPCODE_TXB: return NV_OP_TXB;
766 case TGSI_OPCODE_TXL: return NV_OP_TXL;
767 case TGSI_OPCODE_XOR: return NV_OP_XOR;
775 infer_src_type(unsigned opcode)
778 case TGSI_OPCODE_MOV:
779 case TGSI_OPCODE_AND:
781 case TGSI_OPCODE_XOR:
782 case TGSI_OPCODE_SAD:
783 case TGSI_OPCODE_U2F:
784 case TGSI_OPCODE_UADD:
785 case TGSI_OPCODE_UDIV:
786 case TGSI_OPCODE_UMOD:
787 case TGSI_OPCODE_UMAD:
788 case TGSI_OPCODE_UMUL:
789 case TGSI_OPCODE_UMAX:
790 case TGSI_OPCODE_UMIN:
791 case TGSI_OPCODE_USEQ:
792 case TGSI_OPCODE_USGE:
793 case TGSI_OPCODE_USLT:
794 case TGSI_OPCODE_USNE:
795 case TGSI_OPCODE_USHR:
797 case TGSI_OPCODE_I2F:
798 case TGSI_OPCODE_IDIV:
799 case TGSI_OPCODE_IMAX:
800 case TGSI_OPCODE_IMIN:
801 case TGSI_OPCODE_INEG:
802 case TGSI_OPCODE_ISGE:
803 case TGSI_OPCODE_ISHR:
804 case TGSI_OPCODE_ISLT:
812 infer_dst_type(unsigned opcode)
815 case TGSI_OPCODE_MOV:
816 case TGSI_OPCODE_F2U:
817 case TGSI_OPCODE_AND:
819 case TGSI_OPCODE_XOR:
820 case TGSI_OPCODE_SAD:
821 case TGSI_OPCODE_UADD:
822 case TGSI_OPCODE_UDIV:
823 case TGSI_OPCODE_UMOD:
824 case TGSI_OPCODE_UMAD:
825 case TGSI_OPCODE_UMUL:
826 case TGSI_OPCODE_UMAX:
827 case TGSI_OPCODE_UMIN:
828 case TGSI_OPCODE_USEQ:
829 case TGSI_OPCODE_USGE:
830 case TGSI_OPCODE_USLT:
831 case TGSI_OPCODE_USNE:
832 case TGSI_OPCODE_USHR:
834 case TGSI_OPCODE_F2I:
835 case TGSI_OPCODE_IDIV:
836 case TGSI_OPCODE_IMAX:
837 case TGSI_OPCODE_IMIN:
838 case TGSI_OPCODE_INEG:
839 case TGSI_OPCODE_ISGE:
840 case TGSI_OPCODE_ISHR:
841 case TGSI_OPCODE_ISLT:
850 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
851 unsigned chan, struct nv_value *res)
853 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
854 struct nv_instruction *nvi;
855 struct nv_value *mem;
856 struct nv_value *ptr = NULL;
859 idx = reg->Register.Index;
862 if (reg->Register.Indirect)
863 ptr = FETCH_ADDR(reg->Indirect.Index,
864 tgsi_util_get_src_register_swizzle(®->Indirect, 0));
866 switch (inst->Instruction.Saturate) {
869 case TGSI_SAT_ZERO_ONE:
870 res = bld_insn_1(bld, NV_OP_SAT, res);
872 case TGSI_SAT_MINUS_PLUS_ONE:
873 res = bld_insn_2(bld, NV_OP_MAX_F32, res, bld_load_imm_f32(bld, -1.0f));
874 res = bld_insn_2(bld, NV_OP_MIN_F32, res, bld_load_imm_f32(bld, 1.0f));
878 switch (reg->Register.File) {
879 case TGSI_FILE_OUTPUT:
881 res = bld_insn_1(bld, NV_OP_MOV, res);
883 if (bld->pc->is_fragprog) {
885 STORE_OUTP(idx, chan, res);
887 nvi = new_instruction(bld->pc, NV_OP_EXPORT);
888 mem = new_value(bld->pc, bld->ti->output_file, res->reg.size);
889 nv_reference(bld->pc, nvi, 0, mem);
890 nv_reference(bld->pc, nvi, 1, res);
892 mem->reg.address = bld->ti->output_loc[idx][chan];
894 mem->reg.address = 0x80 + idx * 16 + chan * 4;
898 case TGSI_FILE_TEMPORARY:
899 assert(idx < BLD_MAX_TEMPS);
900 if (!res->insn || res->insn->bb != bld->pc->current_block)
901 res = bld_insn_1(bld, NV_OP_MOV, res);
903 assert(res->reg.file == NV_FILE_GPR);
905 if (bld->ti->require_stores)
906 bld_lmem_store(bld, ptr, idx * 4 + chan, res);
908 STORE_TEMP(idx, chan, res);
910 case TGSI_FILE_ADDRESS:
911 assert(idx < BLD_MAX_ADDRS);
912 STORE_ADDR(idx, chan, res);
917 static INLINE uint32_t
918 bld_is_output_written(struct bld_context *bld, int i, int c)
921 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
922 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
926 bld_append_vp_ucp(struct bld_context *bld)
928 struct nv_value *res[6];
929 struct nv_value *ucp, *vtx, *out;
930 struct nv_instruction *insn;
933 assert(bld->ti->prog->vp.num_ucps <= 6);
935 for (c = 0; c < 4; ++c) {
936 vtx = bld_fetch_global(bld, &bld->ovs[bld->hpos_index][c]);
938 for (i = 0; i < bld->ti->prog->vp.num_ucps; ++i) {
939 ucp = new_value(bld->pc, NV_FILE_MEM_C(15), 4);
940 ucp->reg.address = i * 16 + c * 4;
943 res[i] = bld_insn_2(bld, NV_OP_MUL_F32, vtx, ucp);
945 res[i] = bld_insn_3(bld, NV_OP_MAD_F32, vtx, ucp, res[i]);
949 for (i = 0; i < bld->ti->prog->vp.num_ucps; ++i) {
950 (out = new_value(bld->pc, NV_FILE_MEM_V, 4))->reg.address = 0x2c0 + i * 4;
951 (insn = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
952 nv_reference(bld->pc, insn, 0, out);
953 nv_reference(bld->pc, insn, 1, res[i]);
958 bld_export_fp_outputs(struct bld_context *bld)
960 struct nv_value *vals[4];
961 struct nv_instruction *nvi;
964 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
965 if (!bld_is_output_written(bld, i, -1))
967 for (n = 0, c = 0; c < 4; ++c) {
968 if (!bld_is_output_written(bld, i, c))
970 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
972 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
973 vals[n++]->reg.id = bld->ti->output_loc[i][c];
977 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
978 for (c = 0; c < n; ++c)
979 nv_reference(bld->pc, nvi, c, vals[c]);
984 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
988 bld->pc->current_block = b;
990 for (i = 0; i < 4; ++i)
991 bld->saved_addr[i][0] = NULL;
992 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; ++i)
993 for (c = 0; c < 4; ++c)
994 bld->saved_inputs[i][c] = NULL;
996 bld->out_kind = CFG_EDGE_FORWARD;
999 static struct nv_value *
1000 bld_interp(struct bld_context *bld, unsigned mode, struct nv_value *val)
1002 unsigned cent = mode & NVC0_INTERP_CENTROID;
1004 mode &= ~NVC0_INTERP_CENTROID;
1006 if (val->reg.address == 0x3fc) {
1007 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
1008 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1009 val->insn->flat = 1;
1010 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
1011 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
1014 if (mode == NVC0_INTERP_PERSPECTIVE) {
1015 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frag_coord[3]);
1017 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1020 val->insn->flat = mode == NVC0_INTERP_FLAT ? 1 : 0;
1021 val->insn->centroid = cent ? 1 : 0;
1025 static struct nv_value *
1026 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1027 const unsigned s, const unsigned chan)
1029 const struct tgsi_full_src_register *src = &insn->Src[s];
1030 struct nv_value *res = NULL;
1031 struct nv_value *ptr = NULL;
1032 int idx, ind_idx, dim_idx;
1033 unsigned swz, ind_swz, sgn;
1035 idx = src->Register.Index;
1036 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
1038 if (src->Register.Indirect) {
1039 ind_idx = src->Indirect.Index;
1040 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
1042 ptr = FETCH_ADDR(ind_idx, ind_swz);
1045 if (src->Register.Dimension)
1046 dim_idx = src->Dimension.Index;
1050 switch (src->Register.File) {
1051 case TGSI_FILE_CONSTANT:
1052 assert(dim_idx < 14);
1053 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), 4);
1054 res->reg.address = idx * 16 + swz * 4;
1055 res = bld_insn_1(bld, NV_OP_LD, res);
1057 bld_src_pointer(bld, res->insn, 1, ptr);
1059 case TGSI_FILE_IMMEDIATE: /* XXX: type for MOV TEMP[0], -IMM[0] */
1060 assert(idx < bld->ti->immd32_nr);
1061 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1063 case TGSI_FILE_INPUT:
1064 assert(!src->Register.Dimension);
1066 res = bld->saved_inputs[idx][swz];
1070 res = new_value(bld->pc, bld->ti->input_file, 4);
1072 res->reg.address = 0x80 + idx * 16 + swz * 4;
1074 res->reg.address = bld->ti->input_loc[idx][swz];
1076 if (bld->pc->is_fragprog)
1077 res = bld_interp(bld, bld->ti->interp_mode[idx], res);
1079 res = bld_insn_1(bld, NV_OP_VFETCH, res);
1082 bld_src_pointer(bld, res->insn, res->insn->src[1] ? 2 : 1, ptr);
1084 bld->saved_inputs[idx][swz] = res;
1086 case TGSI_FILE_TEMPORARY:
1087 if (bld->ti->require_stores)
1088 res = bld_lmem_load(bld, ptr, idx * 4 + swz);
1090 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1092 case TGSI_FILE_ADDRESS:
1093 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1095 case TGSI_FILE_PREDICATE:
1096 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1098 case TGSI_FILE_SYSTEM_VALUE:
1099 assert(bld->ti->sysval_loc[idx] < 0xf00); /* >= would mean special reg */
1100 res = new_value(bld->pc,
1101 bld->pc->is_fragprog ? NV_FILE_MEM_V : NV_FILE_MEM_A, 4);
1102 res->reg.address = bld->ti->sysval_loc[idx];
1104 if (res->reg.file == NV_FILE_MEM_A)
1105 res = bld_insn_1(bld, NV_OP_VFETCH, res);
1107 res = bld_interp(bld, NVC0_INTERP_FLAT, res);
1109 /* mesa doesn't do real integers yet :-(and in GL this should be S32) */
1110 res = bld_cvt(bld, NV_TYPE_F32, NV_TYPE_U32, res);
1113 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1118 return bld_undef(bld, NV_FILE_GPR);
1120 sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
1123 case TGSI_UTIL_SIGN_KEEP:
1125 case TGSI_UTIL_SIGN_CLEAR:
1126 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1128 case TGSI_UTIL_SIGN_TOGGLE:
1129 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1131 case TGSI_UTIL_SIGN_SET:
1132 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1133 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1136 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1145 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1146 const struct tgsi_full_instruction *insn)
1148 struct nv_value *val0 = NULL;
1149 unsigned mask = insn->Dst[0].Register.WriteMask;
1151 if (mask & ((1 << 0) | (1 << 3)))
1152 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1154 if (mask & (3 << 1)) {
1155 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), bld->zero);
1156 if (mask & (1 << 1))
1160 if (mask & (1 << 2)) {
1161 struct nv_value *val1, *val3, *src1, *src3, *pred;
1162 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1163 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1165 src1 = emit_fetch(bld, insn, 0, 1);
1166 src3 = emit_fetch(bld, insn, 0, 3);
1168 pred = bld_setp(bld, NV_OP_SET_F32, NV_CC_LE, val0, bld->zero);
1170 val1 = bld_insn_2(bld, NV_OP_MAX_F32, src1, bld->zero);
1171 val3 = bld_insn_2(bld, NV_OP_MAX_F32, src3, neg128);
1172 val3 = bld_insn_2(bld, NV_OP_MIN_F32, val3, pos128);
1173 val3 = bld_pow(bld, val1, val3);
1175 dst0[2] = bld_insn_1(bld, NV_OP_MOV, bld->zero);
1176 bld_src_predicate(bld, dst0[2]->insn, 1, pred);
1178 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1183 describe_texture_target(unsigned target, int *dim,
1184 int *array, int *cube, int *shadow)
1186 *dim = *array = *cube = *shadow = 0;
1189 case TGSI_TEXTURE_1D:
1192 case TGSI_TEXTURE_SHADOW1D:
1195 case TGSI_TEXTURE_UNKNOWN:
1196 case TGSI_TEXTURE_2D:
1197 case TGSI_TEXTURE_RECT:
1200 case TGSI_TEXTURE_SHADOW2D:
1201 case TGSI_TEXTURE_SHADOWRECT:
1205 case TGSI_TEXTURE_3D:
1208 case TGSI_TEXTURE_CUBE:
1212 case TGSI_TEXTURE_1D_ARRAY:
1215 case TGSI_TEXTURE_2D_ARRAY:
1220 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1221 *dim = *array = *shadow = 1;
1223 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1225 *array = *shadow = 1;
1227 case TGSI_TEXTURE_CUBE_ARRAY:
1238 static struct nv_value *
1239 bld_clone(struct bld_context *bld, struct nv_instruction *nvi)
1241 struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
1242 struct nv_instruction *next, *prev;
1253 for (c = 0; c < 5 && nvi->def[c]; ++c)
1254 bld_def(dupi, c, new_value_like(bld->pc, nvi->def[c]));
1256 for (c = 0; c < 6 && nvi->src[c]; ++c) {
1257 dupi->src[c] = NULL;
1258 nv_reference(bld->pc, dupi, c, nvi->src[c]->value);
1261 return dupi->def[0];
1264 /* NOTE: proj(t0) = (t0 / w) / (tc3 / w) = tc0 / tc2 handled by optimizer */
1266 load_proj_tex_coords(struct bld_context *bld,
1267 struct nv_value *t[4], int dim, int shadow,
1268 const struct tgsi_full_instruction *insn)
1271 unsigned mask = (1 << dim) - 1;
1274 mask |= 4; /* depth comparison value */
1276 t[3] = emit_fetch(bld, insn, 0, 3);
1277 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1278 t[3] = bld_clone(bld, t[3]->insn);
1279 t[3]->insn->opcode = NV_OP_LINTERP;
1280 nv_reference(bld->pc, t[3]->insn, 1, NULL);
1282 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1284 for (c = 0; c < 4; ++c) {
1285 if (!(mask & (1 << c)))
1287 t[c] = emit_fetch(bld, insn, 0, c);
1289 if (t[c]->insn->opcode != NV_OP_PINTERP)
1293 t[c] = bld_clone(bld, t[c]->insn);
1294 nv_reference(bld->pc, t[c]->insn, 1, t[3]);
1299 t[3] = emit_fetch(bld, insn, 0, 3);
1300 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1302 for (c = 0; c < 4; ++c)
1303 if (mask & (1 << c))
1304 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], t[3]);
1307 /* For a quad of threads / top left, top right, bottom left, bottom right
1308 * pixels, do a different operation, and take src0 from a specific thread.
1315 #define QOP(a, b, c, d) \
1316 ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
1318 static INLINE struct nv_value *
1319 bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
1320 struct nv_value *src1, boolean wp)
1322 struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
1323 val->insn->lanes = lane;
1324 val->insn->quadop = qop;
1326 assert(!"quadop predicate write");
1331 /* order of TGSI operands: x y z layer shadow lod/bias */
1332 /* order of native operands: layer x y z | lod/bias shadow */
1333 static struct nv_instruction *
1334 emit_tex(struct bld_context *bld, uint opcode, int tic, int tsc,
1335 struct nv_value *dst[4], struct nv_value *arg[4],
1336 int dim, int array, int cube, int shadow)
1338 struct nv_value *src[4];
1339 struct nv_instruction *nvi, *bnd;
1342 boolean lodbias = opcode == NV_OP_TXB || opcode == NV_OP_TXL;
1345 arg[dim] = bld_cvt(bld, NV_TYPE_U32, NV_TYPE_F32, arg[dim]);
1347 /* bind { layer x y z } and { lod/bias shadow } to adjacent regs */
1349 bnd = new_instruction(bld->pc, NV_OP_BIND);
1351 src[s] = new_value(bld->pc, NV_FILE_GPR, 4);
1352 bld_def(bnd, s, src[s]);
1353 nv_reference(bld->pc, bnd, s++, arg[dim + cube]);
1355 for (c = 0; c < dim + cube; ++c, ++s) {
1356 src[s] = bld_def(bnd, s, new_value(bld->pc, NV_FILE_GPR, 4));
1357 nv_reference(bld->pc, bnd, s, arg[c]);
1360 if (shadow || lodbias) {
1361 bnd = new_instruction(bld->pc, NV_OP_BIND);
1364 src[s] = new_value(bld->pc, NV_FILE_GPR, 4);
1365 bld_def(bnd, 0, src[s++]);
1366 nv_reference(bld->pc, bnd, 0, arg[dim + cube + array + shadow]);
1369 src[s] = new_value(bld->pc, NV_FILE_GPR, 4);
1370 bld_def(bnd, lodbias, src[s++]);
1371 nv_reference(bld->pc, bnd, lodbias, arg[dim + cube + array]);
1375 nvi = new_instruction(bld->pc, opcode);
1376 for (c = 0; c < 4; ++c)
1377 dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, 4));
1378 for (c = 0; c < s; ++c)
1379 nv_reference(bld->pc, nvi, c, src[c]);
1381 nvi->ext.tex.t = tic;
1382 nvi->ext.tex.s = tsc;
1383 nvi->tex_mask = 0xf;
1384 nvi->tex_cube = cube;
1386 nvi->tex_cube = cube;
1387 nvi->tex_shadow = shadow;
1388 nvi->tex_array = array;
1395 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1396 const struct tgsi_full_instruction *insn)
1398 struct nv_value *t[4], *s[3];
1399 uint opcode = translate_opcode(insn->Instruction.Opcode);
1400 int c, dim, array, cube, shadow;
1401 const int lodbias = opcode == NV_OP_TXB || opcode == NV_OP_TXL;
1402 const int tic = insn->Src[1].Register.Index;
1403 const int tsc = tic;
1405 describe_texture_target(insn->Texture.Texture, &dim, &array, &cube, &shadow);
1407 assert(dim + array + shadow + lodbias <= 5);
1409 if (!cube && !array && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
1410 load_proj_tex_coords(bld, t, dim, shadow, insn);
1412 for (c = 0; c < dim + cube + array; ++c)
1413 t[c] = emit_fetch(bld, insn, 0, c);
1415 t[c] = emit_fetch(bld, insn, 0, MAX2(c, 2));
1419 for (c = 0; c < 3; ++c)
1420 s[c] = bld_insn_1(bld, NV_OP_ABS_F32, t[c]);
1422 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[1]);
1423 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[2]);
1424 s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
1426 for (c = 0; c < 3; ++c)
1427 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], s[0]);
1431 t[dim + cube + array + shadow] = emit_fetch(bld, insn, 0, 3);
1433 emit_tex(bld, opcode, tic, tsc, dst0, t, dim, array, cube, shadow);
1436 static INLINE struct nv_value *
1437 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1440 struct nv_value *dotp, *src0, *src1;
1443 src0 = emit_fetch(bld, insn, 0, 0);
1444 src1 = emit_fetch(bld, insn, 1, 0);
1445 dotp = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1447 for (c = 1; c < n; ++c) {
1448 src0 = emit_fetch(bld, insn, 0, c);
1449 src1 = emit_fetch(bld, insn, 1, c);
1450 dotp = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dotp);
1455 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1456 for (chan = 0; chan < 4; ++chan) \
1457 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1460 bld_instruction(struct bld_context *bld,
1461 const struct tgsi_full_instruction *insn)
1463 struct nv_value *src0;
1464 struct nv_value *src1;
1465 struct nv_value *src2;
1466 struct nv_value *dst0[4] = { NULL };
1467 struct nv_value *temp;
1469 uint opcode = translate_opcode(insn->Instruction.Opcode);
1470 uint8_t mask = insn->Dst[0].Register.WriteMask;
1472 #if NV50_DEBUG & NV50_DEBUG_PROG_IR
1473 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1476 switch (insn->Instruction.Opcode) {
1477 case TGSI_OPCODE_ADD:
1478 case TGSI_OPCODE_MAX:
1479 case TGSI_OPCODE_MIN:
1480 case TGSI_OPCODE_MUL:
1481 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1482 src0 = emit_fetch(bld, insn, 0, c);
1483 src1 = emit_fetch(bld, insn, 1, c);
1484 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1487 case TGSI_OPCODE_ARL:
1488 src1 = bld_imm_u32(bld, 4);
1489 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1490 src0 = emit_fetch(bld, insn, 0, c);
1491 src0 = bld_insn_1(bld, NV_OP_FLOOR, src0);
1492 src0->insn->ext.cvt.d = NV_TYPE_S32;
1493 src0->insn->ext.cvt.s = NV_TYPE_F32;
1494 dst0[c] = bld_insn_2(bld, NV_OP_SHL, src0, src1);
1497 case TGSI_OPCODE_CMP:
1498 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1499 src0 = emit_fetch(bld, insn, 0, c);
1500 src1 = emit_fetch(bld, insn, 1, c);
1501 src2 = emit_fetch(bld, insn, 2, c);
1502 dst0[c] = bld_insn_3(bld, NV_OP_SLCT_F32, src1, src2, src0);
1503 dst0[c]->insn->set_cond = NV_CC_LT;
1506 case TGSI_OPCODE_COS:
1507 case TGSI_OPCODE_SIN:
1508 src0 = emit_fetch(bld, insn, 0, 0);
1509 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1510 if (insn->Dst[0].Register.WriteMask & 7)
1511 temp = bld_insn_1(bld, opcode, temp);
1512 for (c = 0; c < 3; ++c)
1513 if (insn->Dst[0].Register.WriteMask & (1 << c))
1515 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1517 src0 = emit_fetch(bld, insn, 0, 3);
1518 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1519 dst0[3] = bld_insn_1(bld, opcode, temp);
1521 case TGSI_OPCODE_DP2:
1522 temp = bld_dot(bld, insn, 2);
1523 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1526 case TGSI_OPCODE_DP3:
1527 temp = bld_dot(bld, insn, 3);
1528 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1531 case TGSI_OPCODE_DP4:
1532 temp = bld_dot(bld, insn, 4);
1533 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1536 case TGSI_OPCODE_DPH:
1537 src0 = bld_dot(bld, insn, 3);
1538 src1 = emit_fetch(bld, insn, 1, 3);
1539 temp = bld_insn_2(bld, NV_OP_ADD_F32, src0, src1);
1540 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1543 case TGSI_OPCODE_DST:
1544 if (insn->Dst[0].Register.WriteMask & 1)
1545 dst0[0] = bld_imm_f32(bld, 1.0f);
1546 if (insn->Dst[0].Register.WriteMask & 2) {
1547 src0 = emit_fetch(bld, insn, 0, 1);
1548 src1 = emit_fetch(bld, insn, 1, 1);
1549 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1551 if (insn->Dst[0].Register.WriteMask & 4)
1552 dst0[2] = emit_fetch(bld, insn, 0, 2);
1553 if (insn->Dst[0].Register.WriteMask & 8)
1554 dst0[3] = emit_fetch(bld, insn, 1, 3);
1556 case TGSI_OPCODE_EXP:
1557 src0 = emit_fetch(bld, insn, 0, 0);
1558 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1560 if (insn->Dst[0].Register.WriteMask & 2)
1561 dst0[1] = bld_insn_2(bld, NV_OP_SUB_F32, src0, temp);
1562 if (insn->Dst[0].Register.WriteMask & 1) {
1563 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1564 dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
1566 if (insn->Dst[0].Register.WriteMask & 4) {
1567 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1568 dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
1570 if (insn->Dst[0].Register.WriteMask & 8)
1571 dst0[3] = bld_imm_f32(bld, 1.0f);
1573 case TGSI_OPCODE_EX2:
1574 src0 = emit_fetch(bld, insn, 0, 0);
1575 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1576 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1577 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1580 case TGSI_OPCODE_FRC:
1581 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1582 src0 = emit_fetch(bld, insn, 0, c);
1583 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1584 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, dst0[c]);
1587 case TGSI_OPCODE_KIL:
1588 for (c = 0; c < 4; ++c)
1589 bld_kil(bld, emit_fetch(bld, insn, 0, c));
1591 case TGSI_OPCODE_KILP:
1592 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1594 case TGSI_OPCODE_IF:
1596 struct nv_basic_block *b = new_basic_block(bld->pc);
1597 struct nv_value *pred = emit_fetch(bld, insn, 0, 0);
1599 assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
1601 nvc0_bblock_attach(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1603 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1604 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1606 if (pred->insn && NV_BASEOP(pred->insn->opcode) == NV_OP_SET) {
1607 pred = bld_clone(bld, pred->insn);
1609 pred->reg.file = NV_FILE_PRED;
1610 if (pred->insn->opcode == NV_OP_FSET_F32)
1611 pred->insn->opcode = NV_OP_SET_F32;
1613 pred = bld_setp(bld, NV_OP_SET_U32, NV_CC_NE | NV_CC_U,
1618 bld_flow(bld, NV_OP_BRA, pred, NV_CC_NOT_P, NULL, (bld->cond_lvl == 0));
1621 bld_new_block(bld, b);
1624 case TGSI_OPCODE_ELSE:
1626 struct nv_basic_block *b = new_basic_block(bld->pc);
1629 nvc0_bblock_attach(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1631 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1632 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1634 new_instruction(bld->pc, NV_OP_BRA)->terminator = 1;
1637 bld_new_block(bld, b);
1640 case TGSI_OPCODE_ENDIF:
1642 struct nv_basic_block *b = new_basic_block(bld->pc);
1644 if (!nvc0_bblock_is_terminated(bld->pc->current_block))
1645 bld_flow(bld, NV_OP_BRA, NULL, NV_CC_P, b, FALSE);
1648 nvc0_bblock_attach(bld->pc->current_block, b, bld->out_kind);
1649 nvc0_bblock_attach(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1651 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1653 bld_new_block(bld, b);
1655 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1656 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1657 new_instruction(bld->pc, NV_OP_JOIN)->join = 1;
1661 case TGSI_OPCODE_BGNLOOP:
1663 struct nv_basic_block *bl = new_basic_block(bld->pc);
1664 struct nv_basic_block *bb = new_basic_block(bld->pc);
1666 assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
1668 bld->loop_bb[bld->loop_lvl] = bl;
1669 bld->brkt_bb[bld->loop_lvl] = bb;
1671 nvc0_bblock_attach(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1673 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1675 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1676 bld->pc->loop_nesting_bound++;
1678 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1679 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1680 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1683 case TGSI_OPCODE_BRK:
1685 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1687 bld_flow(bld, NV_OP_BRA, NULL, NV_CC_P, bb, FALSE);
1689 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1690 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1692 bld->out_kind = CFG_EDGE_FAKE;
1695 case TGSI_OPCODE_CONT:
1697 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1699 bld_flow(bld, NV_OP_BRA, NULL, NV_CC_P, bb, FALSE);
1701 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1703 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1704 bld->join_bb[bld->cond_lvl - 1] = NULL;
1705 nvc0_insn_delete(bb->exit->prev);
1707 bld->out_kind = CFG_EDGE_FAKE;
1710 case TGSI_OPCODE_ENDLOOP:
1712 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1714 if (bld->out_kind != CFG_EDGE_FAKE) { /* else we already had BRK/CONT */
1715 bld_flow(bld, NV_OP_BRA, NULL, NV_CC_P, bb, FALSE);
1717 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1720 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1722 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1725 case TGSI_OPCODE_ABS:
1726 case TGSI_OPCODE_CEIL:
1727 case TGSI_OPCODE_FLR:
1728 case TGSI_OPCODE_TRUNC:
1729 case TGSI_OPCODE_DDX:
1730 case TGSI_OPCODE_DDY:
1731 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1732 src0 = emit_fetch(bld, insn, 0, c);
1733 dst0[c] = bld_insn_1(bld, opcode, src0);
1736 case TGSI_OPCODE_LIT:
1737 bld_lit(bld, dst0, insn);
1739 case TGSI_OPCODE_LRP:
1740 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1741 src0 = emit_fetch(bld, insn, 0, c);
1742 src1 = emit_fetch(bld, insn, 1, c);
1743 src2 = emit_fetch(bld, insn, 2, c);
1744 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src1, src2);
1745 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, dst0[c], src0, src2);
1748 case TGSI_OPCODE_MOV:
1749 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1750 dst0[c] = emit_fetch(bld, insn, 0, c);
1752 case TGSI_OPCODE_MAD:
1753 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1754 src0 = emit_fetch(bld, insn, 0, c);
1755 src1 = emit_fetch(bld, insn, 1, c);
1756 src2 = emit_fetch(bld, insn, 2, c);
1757 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1760 case TGSI_OPCODE_POW:
1761 src0 = emit_fetch(bld, insn, 0, 0);
1762 src1 = emit_fetch(bld, insn, 1, 0);
1763 temp = bld_pow(bld, src0, src1);
1764 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1767 case TGSI_OPCODE_LOG:
1768 src0 = emit_fetch(bld, insn, 0, 0);
1769 src0 = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1770 temp = bld_insn_1(bld, NV_OP_LG2, src0);
1772 if (insn->Dst[0].Register.WriteMask & 3) {
1773 temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
1776 if (insn->Dst[0].Register.WriteMask & 2) {
1777 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1778 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1779 temp = bld_insn_1(bld, NV_OP_RCP, temp);
1780 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, temp);
1782 if (insn->Dst[0].Register.WriteMask & 8)
1783 dst0[3] = bld_imm_f32(bld, 1.0f);
1785 case TGSI_OPCODE_RCP:
1786 case TGSI_OPCODE_LG2:
1787 src0 = emit_fetch(bld, insn, 0, 0);
1788 temp = bld_insn_1(bld, opcode, src0);
1789 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1792 case TGSI_OPCODE_RSQ:
1793 src0 = emit_fetch(bld, insn, 0, 0);
1794 temp = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1795 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1796 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1799 case TGSI_OPCODE_SLT:
1800 case TGSI_OPCODE_SGE:
1801 case TGSI_OPCODE_SEQ:
1802 case TGSI_OPCODE_SGT:
1803 case TGSI_OPCODE_SLE:
1804 case TGSI_OPCODE_SNE:
1805 case TGSI_OPCODE_ISLT:
1806 case TGSI_OPCODE_ISGE:
1807 case TGSI_OPCODE_USEQ:
1808 case TGSI_OPCODE_USGE:
1809 case TGSI_OPCODE_USLT:
1810 case TGSI_OPCODE_USNE:
1811 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1812 src0 = emit_fetch(bld, insn, 0, c);
1813 src1 = emit_fetch(bld, insn, 1, c);
1814 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1815 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1818 case TGSI_OPCODE_SCS:
1819 if (insn->Dst[0].Register.WriteMask & 0x3) {
1820 src0 = emit_fetch(bld, insn, 0, 0);
1821 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1822 if (insn->Dst[0].Register.WriteMask & 0x1)
1823 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1824 if (insn->Dst[0].Register.WriteMask & 0x2)
1825 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1827 if (insn->Dst[0].Register.WriteMask & 0x4)
1828 dst0[2] = bld_imm_f32(bld, 0.0f);
1829 if (insn->Dst[0].Register.WriteMask & 0x8)
1830 dst0[3] = bld_imm_f32(bld, 1.0f);
1832 case TGSI_OPCODE_SSG:
1833 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) { /* XXX: set lt, set gt, sub */
1834 src0 = emit_fetch(bld, insn, 0, c);
1835 src1 = bld_insn_2(bld, NV_OP_FSET_F32, src0, bld->zero);
1836 src2 = bld_insn_2(bld, NV_OP_FSET_F32, src0, bld->zero);
1837 src1->insn->set_cond = NV_CC_GT;
1838 src2->insn->set_cond = NV_CC_LT;
1839 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src1, src2);
1842 case TGSI_OPCODE_SUB:
1843 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1844 src0 = emit_fetch(bld, insn, 0, c);
1845 src1 = emit_fetch(bld, insn, 1, c);
1846 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, src1);
1849 case TGSI_OPCODE_TEX:
1850 case TGSI_OPCODE_TXB:
1851 case TGSI_OPCODE_TXL:
1852 case TGSI_OPCODE_TXP:
1853 bld_tex(bld, dst0, insn);
1855 case TGSI_OPCODE_XPD:
1856 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1858 dst0[3] = bld_imm_f32(bld, 1.0f);
1861 src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1862 src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1863 dst0[c] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1865 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1866 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1867 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dst0[c]);
1869 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1872 case TGSI_OPCODE_RET:
1873 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1875 case TGSI_OPCODE_END:
1876 /* VP outputs are exported in-place as scalars, optimization later */
1877 if (bld->pc->is_fragprog)
1878 bld_export_fp_outputs(bld);
1879 if (bld->ti->append_ucp)
1880 bld_append_vp_ucp(bld);
1883 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1888 if (insn->Dst[0].Register.File == TGSI_FILE_OUTPUT &&
1889 !bld->pc->is_fragprog) {
1890 struct nv_instruction *mi = NULL;
1893 if (bld->ti->append_ucp) {
1894 if (bld->ti->output_loc[insn->Dst[0].Register.Index][0] == 0x70) {
1895 bld->hpos_index = insn->Dst[0].Register.Index;
1896 for (c = 0; c < 4; ++c)
1897 if (mask & (1 << c))
1898 STORE_OUTP(insn->Dst[0].Register.Index, c, dst0[c]);
1902 for (c = 0; c < 4; ++c)
1903 if (mask & (1 << c))
1904 if ((dst0[c]->reg.file == NV_FILE_IMM) ||
1905 (dst0[c]->reg.file == NV_FILE_GPR && dst0[c]->reg.id == 63))
1906 dst0[c] = bld_insn_1(bld, NV_OP_MOV, dst0[c]);
1909 if ((mask & 0x3) == 0x3) {
1912 mi = bld_insn_2(bld, NV_OP_BIND, dst0[0], dst0[1])->insn;
1914 if ((mask & 0xc) == 0xc) {
1918 nv_reference(bld->pc, mi, 2, dst0[2]);
1919 nv_reference(bld->pc, mi, 3, dst0[3]);
1923 mi = bld_insn_2(bld, NV_OP_BIND, dst0[2], dst0[3])->insn;
1926 if (mi && (mask & 0x4)) {
1929 nv_reference(bld->pc, mi, 2, dst0[2]);
1933 struct nv_instruction *ex = new_instruction(bld->pc, NV_OP_EXPORT);
1936 nv_reference(bld->pc, ex, 0, new_value(bld->pc, NV_FILE_MEM_V, 4));
1937 nv_reference(bld->pc, ex, 1, mi->def[0]);
1939 for (s = 1; s < size / 4; ++s) {
1940 bld_def(mi, s, new_value(bld->pc, NV_FILE_GPR, 4));
1941 nv_reference(bld->pc, ex, s + 1, mi->def[s]);
1945 ex->src[0]->value->reg.size = size;
1946 ex->src[0]->value->reg.address =
1947 bld->ti->output_loc[insn->Dst[0].Register.Index][c];
1951 for (c = 0; c < 4; ++c)
1952 if (mask & (1 << c))
1953 emit_store(bld, insn, c, dst0[c]);
1957 bld_free_registers(struct bld_register *base, int n)
1961 for (i = 0; i < n; ++i)
1962 for (c = 0; c < 4; ++c)
1963 util_dynarray_fini(&base[i * 4 + c].vals);
1967 nvc0_tgsi_to_nc(struct nv_pc *pc, struct nvc0_translation_info *ti)
1969 struct bld_context *bld = CALLOC_STRUCT(bld_context);
1972 pc->root[0] = pc->current_block = new_basic_block(pc);
1977 pc->loop_nesting_bound = 1;
1979 bld->zero = new_value(pc, NV_FILE_GPR, 4);
1980 bld->zero->reg.id = 63;
1982 if (pc->is_fragprog) {
1983 struct nv_value *mem = new_value(pc, NV_FILE_MEM_V, 4);
1984 mem->reg.address = 0x7c;
1986 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_LINTERP, mem);
1987 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_RCP, bld->frag_coord[3]);
1990 for (ip = 0; ip < ti->num_insns; ++ip)
1991 bld_instruction(bld, &ti->insns[ip]);
1993 bld_free_registers(&bld->tvs[0][0], BLD_MAX_TEMPS);
1994 bld_free_registers(&bld->avs[0][0], BLD_MAX_ADDRS);
1995 bld_free_registers(&bld->pvs[0][0], BLD_MAX_PREDS);
1996 bld_free_registers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
2002 /* If a variable is assigned in a loop, replace all references to the value
2003 * from outside the loop with a phi value.
2006 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
2007 struct nv_value *old_val,
2008 struct nv_value *new_val)
2010 struct nv_instruction *nvi;
2012 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
2014 for (s = 0; s < 6 && nvi->src[s]; ++s)
2015 if (nvi->src[s]->value == old_val)
2016 nv_reference(pc, nvi, s, new_val);
2019 b->pass_seq = pc->pass_seq;
2021 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
2022 bld_replace_value(pc, b->out[0], old_val, new_val);
2024 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
2025 bld_replace_value(pc, b->out[1], old_val, new_val);