1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_util.h"
10 #include "nvfx_context.h"
11 #include "nvfx_shader.h"
13 #define MAX_CONSTS 128
16 struct nvfx_fragment_program *fp;
18 uint attrib_map[PIPE_MAX_SHADER_INPUTS];
21 unsigned r_temps_discard;
22 struct nvfx_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
23 struct nvfx_sreg *r_temp;
36 struct nvfx_sreg imm[MAX_IMM];
40 static INLINE struct nvfx_sreg
41 temp(struct nvfx_fpc *fpc)
43 int idx = ffs(~fpc->r_temps) - 1;
46 NOUVEAU_ERR("out of temps!!\n");
48 return nvfx_sr(NVFXSR_TEMP, 0);
51 fpc->r_temps |= (1 << idx);
52 fpc->r_temps_discard |= (1 << idx);
53 return nvfx_sr(NVFXSR_TEMP, idx);
57 release_temps(struct nvfx_fpc *fpc)
59 fpc->r_temps &= ~fpc->r_temps_discard;
60 fpc->r_temps_discard = 0;
63 static INLINE struct nvfx_sreg
64 constant(struct nvfx_fpc *fpc, int pipe, float vals[4])
68 if (fpc->nr_consts == MAX_CONSTS)
70 idx = fpc->nr_consts++;
72 fpc->consts[idx].pipe = pipe;
74 memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
75 return nvfx_sr(NVFXSR_CONST, idx);
78 #define arith(cc,s,o,d,m,s0,s1,s2) \
79 nvfx_fp_arith((cc), (s), NVFX_FP_OP_OPCODE_##o, \
80 (d), (m), (s0), (s1), (s2))
81 #define tex(cc,s,o,u,d,m,s0,s1,s2) \
82 nvfx_fp_tex((cc), (s), NVFX_FP_OP_OPCODE_##o, (u), \
83 (d), (m), (s0), none, none)
86 grow_insns(struct nvfx_fpc *fpc, int size)
88 struct nvfx_fragment_program *fp = fpc->fp;
91 fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
95 emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_sreg src)
97 struct nvfx_fragment_program *fp = fpc->fp;
98 uint32_t *hw = &fp->insn[fpc->inst_offset];
103 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
104 hw[0] |= (src.index << NVFX_FP_OP_INPUT_SRC_SHIFT);
107 sr |= NVFX_FP_REG_SRC_HALF;
110 sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT);
111 sr |= (src.index << NVFX_FP_REG_SRC_SHIFT);
114 if (!fpc->have_const) {
119 hw = &fp->insn[fpc->inst_offset];
120 if (fpc->consts[src.index].pipe >= 0) {
121 struct nvfx_fragment_program_data *fpd;
123 fp->consts = realloc(fp->consts, ++fp->nr_consts *
125 fpd = &fp->consts[fp->nr_consts - 1];
126 fpd->offset = fpc->inst_offset + 4;
127 fpd->index = fpc->consts[src.index].pipe;
128 memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
130 memcpy(&fp->insn[fpc->inst_offset + 4],
131 fpc->consts[src.index].vals,
132 sizeof(uint32_t) * 4);
135 sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT);
138 sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT);
145 sr |= NVFX_FP_REG_NEGATE;
148 hw[1] |= (1 << (29 + pos));
150 sr |= ((src.swz[0] << NVFX_FP_REG_SWZ_X_SHIFT) |
151 (src.swz[1] << NVFX_FP_REG_SWZ_Y_SHIFT) |
152 (src.swz[2] << NVFX_FP_REG_SWZ_Z_SHIFT) |
153 (src.swz[3] << NVFX_FP_REG_SWZ_W_SHIFT));
159 emit_dst(struct nvfx_fpc *fpc, struct nvfx_sreg dst)
161 struct nvfx_fragment_program *fp = fpc->fp;
162 uint32_t *hw = &fp->insn[fpc->inst_offset];
166 if (fpc->num_regs < (dst.index + 1))
167 fpc->num_regs = dst.index + 1;
170 if (dst.index == 1) {
171 fp->fp_control |= 0xe;
173 hw[0] |= NVFX_FP_OP_OUT_REG_HALF;
183 hw[0] |= (dst.index << NVFX_FP_OP_OUT_REG_SHIFT);
187 nvfx_fp_arith(struct nvfx_fpc *fpc, int sat, int op,
188 struct nvfx_sreg dst, int mask,
189 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
191 struct nvfx_fragment_program *fp = fpc->fp;
194 fpc->inst_offset = fp->insn_len;
197 hw = &fp->insn[fpc->inst_offset];
198 memset(hw, 0, sizeof(uint32_t) * 4);
200 if (op == NVFX_FP_OP_OPCODE_KIL)
201 fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
202 hw[0] |= (op << NVFX_FP_OP_OPCODE_SHIFT);
203 hw[0] |= (mask << NVFX_FP_OP_OUTMASK_SHIFT);
204 hw[2] |= (dst.dst_scale << NVFX_FP_OP_DST_SCALE_SHIFT);
207 hw[0] |= NVFX_FP_OP_OUT_SAT;
210 hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE;
211 hw[1] |= (dst.cc_test << NVFX_FP_OP_COND_SHIFT);
212 hw[1] |= ((dst.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) |
213 (dst.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) |
214 (dst.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) |
215 (dst.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT));
218 emit_src(fpc, 0, s0);
219 emit_src(fpc, 1, s1);
220 emit_src(fpc, 2, s2);
224 nvfx_fp_tex(struct nvfx_fpc *fpc, int sat, int op, int unit,
225 struct nvfx_sreg dst, int mask,
226 struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2)
228 struct nvfx_fragment_program *fp = fpc->fp;
230 nvfx_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2);
232 fp->insn[fpc->inst_offset] |= (unit << NVFX_FP_OP_TEX_UNIT_SHIFT);
233 fp->samplers |= (1 << unit);
236 static INLINE struct nvfx_sreg
237 tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc)
239 struct nvfx_sreg src;
241 switch (fsrc->Register.File) {
242 case TGSI_FILE_INPUT:
243 src = nvfx_sr(NVFXSR_INPUT,
244 fpc->attrib_map[fsrc->Register.Index]);
246 case TGSI_FILE_CONSTANT:
247 src = constant(fpc, fsrc->Register.Index, NULL);
249 case TGSI_FILE_IMMEDIATE:
250 assert(fsrc->Register.Index < fpc->nr_imm);
251 src = fpc->imm[fsrc->Register.Index];
253 case TGSI_FILE_TEMPORARY:
254 src = fpc->r_temp[fsrc->Register.Index];
256 /* NV40 fragprog result regs are just temps, so this is simple */
257 case TGSI_FILE_OUTPUT:
258 src = fpc->r_result[fsrc->Register.Index];
261 NOUVEAU_ERR("bad src file\n");
265 src.abs = fsrc->Register.Absolute;
266 src.negate = fsrc->Register.Negate;
267 src.swz[0] = fsrc->Register.SwizzleX;
268 src.swz[1] = fsrc->Register.SwizzleY;
269 src.swz[2] = fsrc->Register.SwizzleZ;
270 src.swz[3] = fsrc->Register.SwizzleW;
274 static INLINE struct nvfx_sreg
275 tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
276 switch (fdst->Register.File) {
277 case TGSI_FILE_OUTPUT:
278 return fpc->r_result[fdst->Register.Index];
279 case TGSI_FILE_TEMPORARY:
280 return fpc->r_temp[fdst->Register.Index];
282 return nvfx_sr(NVFXSR_NONE, 0);
284 NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File);
285 return nvfx_sr(NVFXSR_NONE, 0);
294 if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_FP_MASK_X;
295 if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_FP_MASK_Y;
296 if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_FP_MASK_Z;
297 if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_FP_MASK_W;
302 nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
303 const struct tgsi_full_instruction *finst)
305 const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0);
306 struct nvfx_sreg src[3], dst, tmp;
308 int ai = -1, ci = -1, ii = -1;
311 if (finst->Instruction.Opcode == TGSI_OPCODE_END)
314 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
315 const struct tgsi_full_src_register *fsrc;
317 fsrc = &finst->Src[i];
318 if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
319 src[i] = tgsi_src(fpc, fsrc);
323 for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
324 const struct tgsi_full_src_register *fsrc;
326 fsrc = &finst->Src[i];
328 switch (fsrc->Register.File) {
329 case TGSI_FILE_INPUT:
330 if (ai == -1 || ai == fsrc->Register.Index) {
331 ai = fsrc->Register.Index;
332 src[i] = tgsi_src(fpc, fsrc);
335 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
336 tgsi_src(fpc, fsrc), none, none);
339 case TGSI_FILE_CONSTANT:
340 if ((ci == -1 && ii == -1) ||
341 ci == fsrc->Register.Index) {
342 ci = fsrc->Register.Index;
343 src[i] = tgsi_src(fpc, fsrc);
346 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
347 tgsi_src(fpc, fsrc), none, none);
350 case TGSI_FILE_IMMEDIATE:
351 if ((ci == -1 && ii == -1) ||
352 ii == fsrc->Register.Index) {
353 ii = fsrc->Register.Index;
354 src[i] = tgsi_src(fpc, fsrc);
357 arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL,
358 tgsi_src(fpc, fsrc), none, none);
361 case TGSI_FILE_TEMPORARY:
364 case TGSI_FILE_SAMPLER:
365 unit = fsrc->Register.Index;
367 case TGSI_FILE_OUTPUT:
370 NOUVEAU_ERR("bad src file\n");
375 dst = tgsi_dst(fpc, &finst->Dst[0]);
376 mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
377 sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
379 switch (finst->Instruction.Opcode) {
380 case TGSI_OPCODE_ABS:
381 arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none);
383 case TGSI_OPCODE_ADD:
384 arith(fpc, sat, ADD, dst, mask, src[0], src[1], none);
386 case TGSI_OPCODE_CMP:
387 tmp = nvfx_sr(NVFXSR_NONE, 0);
389 arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none);
390 dst.cc_test = NVFX_COND_GE;
391 arith(fpc, sat, MOV, dst, mask, src[2], none, none);
392 dst.cc_test = NVFX_COND_LT;
393 arith(fpc, sat, MOV, dst, mask, src[1], none, none);
395 case TGSI_OPCODE_COS:
396 arith(fpc, sat, COS, dst, mask, src[0], none, none);
398 case TGSI_OPCODE_DDX:
399 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
401 arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y,
402 swz(src[0], Z, W, Z, W), none, none);
403 arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W,
404 swz(tmp, X, Y, X, Y), none, none);
405 arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0],
407 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
409 arith(fpc, sat, DDX, dst, mask, src[0], none, none);
412 case TGSI_OPCODE_DDY:
413 if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) {
415 arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y,
416 swz(src[0], Z, W, Z, W), none, none);
417 arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W,
418 swz(tmp, X, Y, X, Y), none, none);
419 arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0],
421 arith(fpc, 0, MOV, dst, mask, tmp, none, none);
423 arith(fpc, sat, DDY, dst, mask, src[0], none, none);
426 case TGSI_OPCODE_DP3:
427 arith(fpc, sat, DP3, dst, mask, src[0], src[1], none);
429 case TGSI_OPCODE_DP4:
430 arith(fpc, sat, DP4, dst, mask, src[0], src[1], none);
432 case TGSI_OPCODE_DPH:
434 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[1], none);
435 arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X),
436 swz(src[1], W, W, W, W), none);
438 case TGSI_OPCODE_DST:
439 arith(fpc, sat, DST, dst, mask, src[0], src[1], none);
441 case TGSI_OPCODE_EX2:
442 arith(fpc, sat, EX2, dst, mask, src[0], none, none);
444 case TGSI_OPCODE_FLR:
445 arith(fpc, sat, FLR, dst, mask, src[0], none, none);
447 case TGSI_OPCODE_FRC:
448 arith(fpc, sat, FRC, dst, mask, src[0], none, none);
450 case TGSI_OPCODE_KILP:
451 arith(fpc, 0, KIL, none, 0, none, none, none);
453 case TGSI_OPCODE_KIL:
454 dst = nvfx_sr(NVFXSR_NONE, 0);
456 arith(fpc, 0, MOV, dst, NVFX_FP_MASK_ALL, src[0], none, none);
457 dst.cc_update = 0; dst.cc_test = NVFX_COND_LT;
458 arith(fpc, 0, KIL, dst, 0, none, none, none);
460 case TGSI_OPCODE_LG2:
461 arith(fpc, sat, LG2, dst, mask, src[0], none, none);
463 // case TGSI_OPCODE_LIT:
464 case TGSI_OPCODE_LRP:
466 arith(fpc, sat, LRP_NV30, dst, mask, src[0], src[1], src[2]);
469 arith(fpc, 0, MAD, tmp, mask, neg(src[0]), src[2], src[2]);
470 arith(fpc, sat, MAD, dst, mask, src[0], src[1], tmp);
473 case TGSI_OPCODE_MAD:
474 arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]);
476 case TGSI_OPCODE_MAX:
477 arith(fpc, sat, MAX, dst, mask, src[0], src[1], none);
479 case TGSI_OPCODE_MIN:
480 arith(fpc, sat, MIN, dst, mask, src[0], src[1], none);
482 case TGSI_OPCODE_MOV:
483 arith(fpc, sat, MOV, dst, mask, src[0], none, none);
485 case TGSI_OPCODE_MUL:
486 arith(fpc, sat, MUL, dst, mask, src[0], src[1], none);
488 case TGSI_OPCODE_POW:
490 arith(fpc, sat, POW_NV30, dst, mask, src[0], src[1], none);
493 arith(fpc, 0, LG2, tmp, NVFX_FP_MASK_X,
494 swz(src[0], X, X, X, X), none, none);
495 arith(fpc, 0, MUL, tmp, NVFX_FP_MASK_X, swz(tmp, X, X, X, X),
496 swz(src[1], X, X, X, X), none);
497 arith(fpc, sat, EX2, dst, mask,
498 swz(tmp, X, X, X, X), none, none);
501 case TGSI_OPCODE_RCP:
502 arith(fpc, sat, RCP, dst, mask, src[0], none, none);
504 case TGSI_OPCODE_RET:
507 case TGSI_OPCODE_RFL:
509 arith(fpc, 0, RFL_NV30, dst, mask, src[0], src[1], none);
512 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[0], none);
513 arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_Y, src[0], src[1], none);
514 arith(fpc, 0, DIV, scale(tmp, 2X), NVFX_FP_MASK_Z,
515 swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
516 arith(fpc, sat, MAD, dst, mask,
517 swz(tmp, Z, Z, Z, Z), src[0], neg(src[1]));
520 case TGSI_OPCODE_RSQ:
522 arith(fpc, sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none);
525 arith(fpc, 0, LG2, scale(tmp, INV_2X), NVFX_FP_MASK_X,
526 abs(swz(src[0], X, X, X, X)), none, none);
527 arith(fpc, sat, EX2, dst, mask,
528 neg(swz(tmp, X, X, X, X)), none, none);
531 case TGSI_OPCODE_SCS:
532 /* avoid overwriting the source */
533 if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X)
535 if (mask & NVFX_FP_MASK_X) {
536 arith(fpc, sat, COS, dst, NVFX_FP_MASK_X,
537 swz(src[0], X, X, X, X), none, none);
539 if (mask & NVFX_FP_MASK_Y) {
540 arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y,
541 swz(src[0], X, X, X, X), none, none);
546 if (mask & NVFX_FP_MASK_Y) {
547 arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y,
548 swz(src[0], X, X, X, X), none, none);
550 if (mask & NVFX_FP_MASK_X) {
551 arith(fpc, sat, COS, dst, NVFX_FP_MASK_X,
552 swz(src[0], X, X, X, X), none, none);
556 case TGSI_OPCODE_SEQ:
557 arith(fpc, sat, SEQ, dst, mask, src[0], src[1], none);
559 case TGSI_OPCODE_SFL:
560 arith(fpc, sat, SFL, dst, mask, src[0], src[1], none);
562 case TGSI_OPCODE_SGE:
563 arith(fpc, sat, SGE, dst, mask, src[0], src[1], none);
565 case TGSI_OPCODE_SGT:
566 arith(fpc, sat, SGT, dst, mask, src[0], src[1], none);
568 case TGSI_OPCODE_SIN:
569 arith(fpc, sat, SIN, dst, mask, src[0], none, none);
571 case TGSI_OPCODE_SLE:
572 arith(fpc, sat, SLE, dst, mask, src[0], src[1], none);
574 case TGSI_OPCODE_SLT:
575 arith(fpc, sat, SLT, dst, mask, src[0], src[1], none);
577 case TGSI_OPCODE_SNE:
578 arith(fpc, sat, SNE, dst, mask, src[0], src[1], none);
580 case TGSI_OPCODE_STR:
581 arith(fpc, sat, STR, dst, mask, src[0], src[1], none);
583 case TGSI_OPCODE_SUB:
584 arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none);
586 case TGSI_OPCODE_TEX:
587 tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none);
589 case TGSI_OPCODE_TXB:
590 tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none);
592 case TGSI_OPCODE_TXP:
593 tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none);
595 case TGSI_OPCODE_XPD:
597 arith(fpc, 0, MUL, tmp, mask,
598 swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
599 arith(fpc, sat, MAD, dst, (mask & ~NVFX_FP_MASK_W),
600 swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
604 NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
613 nvfx_fragprog_parse_decl_attrib(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
614 const struct tgsi_full_declaration *fdec)
618 switch (fdec->Semantic.Name) {
619 case TGSI_SEMANTIC_POSITION:
620 hw = NVFX_FP_OP_INPUT_SRC_POSITION;
622 case TGSI_SEMANTIC_COLOR:
623 if (fdec->Semantic.Index == 0) {
624 hw = NVFX_FP_OP_INPUT_SRC_COL0;
626 if (fdec->Semantic.Index == 1) {
627 hw = NVFX_FP_OP_INPUT_SRC_COL1;
629 NOUVEAU_ERR("bad colour semantic index\n");
633 case TGSI_SEMANTIC_FOG:
634 hw = NVFX_FP_OP_INPUT_SRC_FOGC;
636 case TGSI_SEMANTIC_GENERIC:
637 if (fdec->Semantic.Index <= 7) {
638 hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic.
641 NOUVEAU_ERR("bad generic semantic index\n");
646 NOUVEAU_ERR("bad input semantic\n");
650 fpc->attrib_map[fdec->Range.First] = hw;
655 nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc,
656 const struct tgsi_full_declaration *fdec)
658 unsigned idx = fdec->Range.First;
661 switch (fdec->Semantic.Name) {
662 case TGSI_SEMANTIC_POSITION:
665 case TGSI_SEMANTIC_COLOR:
667 switch (fdec->Semantic.Index) {
668 case 0: hw = 0; break;
669 case 1: hw = 2; break;
670 case 2: hw = 3; break;
671 case 3: hw = 4; break;
673 if(hw > ((nvfx->is_nv4x) ? 4 : 2)) {
674 NOUVEAU_ERR("bad rcol index\n");
679 NOUVEAU_ERR("bad output semantic\n");
683 fpc->r_result[idx] = nvfx_sr(NVFXSR_OUTPUT, hw);
684 fpc->r_temps |= (1 << hw);
689 nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc)
691 struct tgsi_parse_context p;
692 int high_temp = -1, i;
694 tgsi_parse_init(&p, fpc->fp->pipe.tokens);
695 while (!tgsi_parse_end_of_tokens(&p)) {
696 const union tgsi_full_token *tok = &p.FullToken;
698 tgsi_parse_token(&p);
699 switch(tok->Token.Type) {
700 case TGSI_TOKEN_TYPE_DECLARATION:
702 const struct tgsi_full_declaration *fdec;
703 fdec = &p.FullToken.FullDeclaration;
704 switch (fdec->Declaration.File) {
705 case TGSI_FILE_INPUT:
706 if (!nvfx_fragprog_parse_decl_attrib(nvfx, fpc, fdec))
709 case TGSI_FILE_OUTPUT:
710 if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec))
713 case TGSI_FILE_TEMPORARY:
714 if (fdec->Range.Last > high_temp) {
724 case TGSI_TOKEN_TYPE_IMMEDIATE:
726 struct tgsi_full_immediate *imm;
729 imm = &p.FullToken.FullImmediate;
730 assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
731 assert(fpc->nr_imm < MAX_IMM);
733 vals[0] = imm->u[0].Float;
734 vals[1] = imm->u[1].Float;
735 vals[2] = imm->u[2].Float;
736 vals[3] = imm->u[3].Float;
737 fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
747 fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_sreg));
748 for (i = 0; i < high_temp; i++)
749 fpc->r_temp[i] = temp(fpc);
750 fpc->r_temps_discard = 0;
763 nvfx_fragprog_translate(struct nvfx_context *nvfx,
764 struct nvfx_fragment_program *fp)
766 struct tgsi_parse_context parse;
767 struct nvfx_fpc *fpc = NULL;
769 fpc = CALLOC(1, sizeof(struct nvfx_fpc));
775 if (!nvfx_fragprog_prepare(nvfx, fpc)) {
780 tgsi_parse_init(&parse, fp->pipe.tokens);
782 while (!tgsi_parse_end_of_tokens(&parse)) {
783 tgsi_parse_token(&parse);
785 switch (parse.FullToken.Token.Type) {
786 case TGSI_TOKEN_TYPE_INSTRUCTION:
788 const struct tgsi_full_instruction *finst;
790 finst = &parse.FullToken.FullInstruction;
791 if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst))
801 fp->fp_control |= (fpc->num_regs-1)/2;
803 fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
805 /* Terminate final instruction */
807 fp->insn[fpc->inst_offset] |= 0x00000001;
809 /* Append NOP + END instruction, may or may not be necessary. */
810 fpc->inst_offset = fp->insn_len;
812 fp->insn[fpc->inst_offset + 0] = 0x00000001;
813 fp->insn[fpc->inst_offset + 1] = 0x00000000;
814 fp->insn[fpc->inst_offset + 2] = 0x00000000;
815 fp->insn[fpc->inst_offset + 3] = 0x00000000;
817 fp->translated = TRUE;
819 tgsi_parse_free(&parse);
826 nvfx_fragprog_upload(struct nvfx_context *nvfx,
827 struct nvfx_fragment_program *fp)
829 struct pipe_screen *pscreen = nvfx->pipe.screen;
830 const uint32_t le = 1;
834 map = pipe_buffer_map(pscreen, fp->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
837 for (i = 0; i < fp->insn_len; i++) {
838 fflush(stdout); fflush(stderr);
839 NOUVEAU_ERR("%d 0x%08x\n", i, fp->insn[i]);
840 fflush(stdout); fflush(stderr);
844 if ((*(const uint8_t *)&le)) {
845 for (i = 0; i < fp->insn_len; i++) {
846 map[i] = fp->insn[i];
849 /* Weird swapping for big-endian chips */
850 for (i = 0; i < fp->insn_len; i++) {
851 map[i] = ((fp->insn[i] & 0xffff) << 16) |
852 ((fp->insn[i] >> 16) & 0xffff);
856 pipe_buffer_unmap(pscreen, fp->buffer);
860 nvfx_fragprog_validate(struct nvfx_context *nvfx)
862 struct nvfx_fragment_program *fp = nvfx->fragprog;
863 struct pipe_buffer *constbuf =
864 nvfx->constbuf[PIPE_SHADER_FRAGMENT];
865 struct pipe_screen *pscreen = nvfx->pipe.screen;
866 struct nouveau_stateobj *so;
867 boolean new_consts = FALSE;
871 goto update_constants;
873 nvfx->fallback_swrast &= ~NVFX_NEW_FRAGPROG;
874 nvfx_fragprog_translate(nvfx, fp);
875 if (!fp->translated) {
876 nvfx->fallback_swrast |= NVFX_NEW_FRAGPROG;
880 fp->buffer = pscreen->buffer_create(pscreen, 0x100, 0, fp->insn_len * 4);
881 nvfx_fragprog_upload(nvfx, fp);
883 so = so_new(4, 4, 1);
884 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_ACTIVE_PROGRAM, 1);
885 so_reloc (so, nouveau_bo(fp->buffer), 0, NOUVEAU_BO_VRAM |
886 NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
887 NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
888 NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
889 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_CONTROL, 1);
890 so_data (so, fp->fp_control);
892 so_method(so, nvfx->screen->eng3d, NV34TCL_FP_REG_CONTROL, 1);
893 so_data (so, (1<<16)|0x4);
894 so_method(so, nvfx->screen->eng3d, NV34TCL_TX_UNITS_ENABLE, 1);
895 so_data (so, fp->samplers);
905 map = pipe_buffer_map(pscreen, constbuf,
906 PIPE_BUFFER_USAGE_CPU_READ);
907 for (i = 0; i < fp->nr_consts; i++) {
908 struct nvfx_fragment_program_data *fpd = &fp->consts[i];
909 uint32_t *p = &fp->insn[fpd->offset];
910 uint32_t *cb = (uint32_t *)&map[fpd->index * 4];
912 if (!memcmp(p, cb, 4 * sizeof(float)))
914 memcpy(p, cb, 4 * sizeof(float));
917 pipe_buffer_unmap(pscreen, constbuf);
920 nvfx_fragprog_upload(nvfx, fp);
923 if (new_consts || fp->so != nvfx->state.hw[NVFX_STATE_FRAGPROG]) {
924 so_ref(fp->so, &nvfx->state.hw[NVFX_STATE_FRAGPROG]);
932 nvfx_fragprog_destroy(struct nvfx_context *nvfx,
933 struct nvfx_fragment_program *fp)
936 pipe_buffer_reference(&fp->buffer, NULL);
939 so_ref(NULL, &fp->so);
945 struct nvfx_state_entry nvfx_state_fragprog = {
946 .validate = nvfx_fragprog_validate,
948 .pipe = NVFX_NEW_FRAGPROG,
949 .hw = NVFX_STATE_FRAGPROG