2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
33 #include "main/macros.h"
34 #include "brw_context.h"
37 static GLboolean can_do_pln(struct intel_context *intel,
38 const struct brw_reg *deltas)
40 struct brw_context *brw = brw_context(&intel->ctx);
45 if (deltas[1].nr != deltas[0].nr + 1)
48 if (intel->gen < 6 && ((deltas[0].nr & 1) != 0))
54 /* Not quite sure how correct this is - need to understand horiz
55 * vs. vertical strides a little better.
57 static INLINE struct brw_reg sechalf( struct brw_reg reg )
64 /* Return the SrcReg index of the channels that can be immediate float operands
65 * instead of usage of PROGRAM_CONSTANT values through push/pull.
68 brw_wm_arg_can_be_immediate(enum prog_opcode opcode, int arg)
70 int opcode_array[] = {
89 /* These opcodes get broken down in a way that allow two
90 * args to be immediates.
92 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
93 if (arg == 1 || arg == 2)
97 if (opcode > ARRAY_SIZE(opcode_array))
100 return arg == opcode_array[opcode] - 1;
104 * Computes the screen-space x,y position of the pixels.
106 * This will be used by emit_delta_xy() or emit_wpos_xy() for
107 * interpolation of attributes..
111 * R0.0 -- pixel mask, one bit for each of 4 pixels in 4 tiles,
112 * corresponding to each of the 16 execution channels.
114 * R1.0 -- triangle vertex 0.X
115 * R1.1 -- triangle vertex 0.Y
116 * R1.2 -- tile 0 x,y coords (2 packed uwords)
117 * R1.3 -- tile 1 x,y coords (2 packed uwords)
118 * R1.4 -- tile 2 x,y coords (2 packed uwords)
119 * R1.5 -- tile 3 x,y coords (2 packed uwords)
124 void emit_pixel_xy(struct brw_wm_compile *c,
125 const struct brw_reg *dst,
128 struct brw_compile *p = &c->func;
129 struct brw_reg r1 = brw_vec1_grf(1, 0);
130 struct brw_reg r1_uw = retype(r1, BRW_REGISTER_TYPE_UW);
131 struct brw_reg dst0_uw, dst1_uw;
133 brw_push_insn_state(p);
134 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
136 if (c->dispatch_width == 16) {
137 dst0_uw = vec16(retype(dst[0], BRW_REGISTER_TYPE_UW));
138 dst1_uw = vec16(retype(dst[1], BRW_REGISTER_TYPE_UW));
140 dst0_uw = vec8(retype(dst[0], BRW_REGISTER_TYPE_UW));
141 dst1_uw = vec8(retype(dst[1], BRW_REGISTER_TYPE_UW));
144 /* Calculate pixel centers by adding 1 or 0 to each of the
145 * micro-tile coordinates passed in r1.
147 if (mask & WRITEMASK_X) {
150 stride(suboffset(r1_uw, 4), 2, 4, 0),
151 brw_imm_v(0x10101010));
154 if (mask & WRITEMASK_Y) {
157 stride(suboffset(r1_uw,5), 2, 4, 0),
158 brw_imm_v(0x11001100));
160 brw_pop_insn_state(p);
164 * Computes the screen-space x,y distance of the pixels from the start
167 * This will be used in linterp or pinterp with the start vertex value
168 * and the Cx, Cy, and C0 coefficients passed in from the setup engine
169 * to produce interpolated attribute values.
171 void emit_delta_xy(struct brw_compile *p,
172 const struct brw_reg *dst,
174 const struct brw_reg *arg0)
176 struct intel_context *intel = &p->brw->intel;
177 struct brw_reg r1 = brw_vec1_grf(1, 0);
182 assert(mask == WRITEMASK_XY);
184 if (intel->gen >= 6) {
185 /* XXX Gen6 WM doesn't have Xstart/Ystart in payload r1.0/r1.1.
186 Just add them with 0.0 for dst reg.. */
187 r1 = brw_imm_v(0x00000000);
190 retype(arg0[0], BRW_REGISTER_TYPE_UW),
194 retype(arg0[1], BRW_REGISTER_TYPE_UW),
199 /* Calc delta X,Y by subtracting origin in r1 from the pixel
200 * centers produced by emit_pixel_xy().
204 retype(arg0[0], BRW_REGISTER_TYPE_UW),
208 retype(arg0[1], BRW_REGISTER_TYPE_UW),
209 negate(suboffset(r1,1)));
213 * Computes the pixel offset from the window origin for gl_FragCoord().
215 void emit_wpos_xy(struct brw_wm_compile *c,
216 const struct brw_reg *dst,
218 const struct brw_reg *arg0)
220 struct brw_compile *p = &c->func;
222 if (mask & WRITEMASK_X) {
223 if (c->fp->program.PixelCenterInteger) {
227 retype(arg0[0], BRW_REGISTER_TYPE_W));
232 retype(arg0[0], BRW_REGISTER_TYPE_W),
237 if (mask & WRITEMASK_Y) {
238 if (c->fp->program.OriginUpperLeft) {
239 if (c->fp->program.PixelCenterInteger) {
243 retype(arg0[1], BRW_REGISTER_TYPE_W));
248 retype(arg0[1], BRW_REGISTER_TYPE_W),
252 float center_offset = c->fp->program.PixelCenterInteger ? 0.0 : 0.5;
254 /* Y' = (height - 1) - Y + center */
257 negate(retype(arg0[1], BRW_REGISTER_TYPE_W)),
258 brw_imm_f(c->key.drawable_height - 1 + center_offset));
264 void emit_pixel_w(struct brw_wm_compile *c,
265 const struct brw_reg *dst,
267 const struct brw_reg *arg0,
268 const struct brw_reg *deltas)
270 struct brw_compile *p = &c->func;
271 struct intel_context *intel = &p->brw->intel;
273 struct brw_reg temp_dst;
278 temp_dst = brw_message_reg(2);
280 assert(intel->gen < 6);
282 /* Don't need this if all you are doing is interpolating color, for
285 if (mask & WRITEMASK_W) {
286 struct brw_reg interp3 = brw_vec1_grf(arg0[0].nr+1, 4);
288 /* Calc 1/w - just linterp wpos[3] optimized by putting the
289 * result straight into a message reg.
291 if (can_do_pln(intel, deltas)) {
292 brw_PLN(p, temp_dst, interp3, deltas[0]);
294 brw_LINE(p, brw_null_reg(), interp3, deltas[0]);
295 brw_MAC(p, temp_dst, suboffset(interp3, 1), deltas[1]);
302 src = brw_null_reg();
304 if (c->dispatch_width == 16) {
305 brw_math_16(p, dst[3],
306 BRW_MATH_FUNCTION_INV,
307 BRW_MATH_SATURATE_NONE,
309 BRW_MATH_PRECISION_FULL);
312 BRW_MATH_FUNCTION_INV,
313 BRW_MATH_SATURATE_NONE,
315 BRW_MATH_DATA_VECTOR,
316 BRW_MATH_PRECISION_FULL);
321 void emit_linterp(struct brw_compile *p,
322 const struct brw_reg *dst,
324 const struct brw_reg *arg0,
325 const struct brw_reg *deltas)
327 struct intel_context *intel = &p->brw->intel;
328 struct brw_reg interp[4];
329 GLuint nr = arg0[0].nr;
332 interp[0] = brw_vec1_grf(nr, 0);
333 interp[1] = brw_vec1_grf(nr, 4);
334 interp[2] = brw_vec1_grf(nr+1, 0);
335 interp[3] = brw_vec1_grf(nr+1, 4);
337 for (i = 0; i < 4; i++) {
339 if (intel->gen >= 6) {
340 brw_PLN(p, dst[i], interp[i], brw_vec8_grf(2, 0));
341 } else if (can_do_pln(intel, deltas)) {
342 brw_PLN(p, dst[i], interp[i], deltas[0]);
344 brw_LINE(p, brw_null_reg(), interp[i], deltas[0]);
345 brw_MAC(p, dst[i], suboffset(interp[i],1), deltas[1]);
352 void emit_pinterp(struct brw_compile *p,
353 const struct brw_reg *dst,
355 const struct brw_reg *arg0,
356 const struct brw_reg *deltas,
357 const struct brw_reg *w)
359 struct intel_context *intel = &p->brw->intel;
360 struct brw_reg interp[4];
361 GLuint nr = arg0[0].nr;
364 if (intel->gen >= 6) {
365 emit_linterp(p, dst, mask, arg0, interp);
369 interp[0] = brw_vec1_grf(nr, 0);
370 interp[1] = brw_vec1_grf(nr, 4);
371 interp[2] = brw_vec1_grf(nr+1, 0);
372 interp[3] = brw_vec1_grf(nr+1, 4);
374 for (i = 0; i < 4; i++) {
376 if (can_do_pln(intel, deltas)) {
377 brw_PLN(p, dst[i], interp[i], deltas[0]);
379 brw_LINE(p, brw_null_reg(), interp[i], deltas[0]);
380 brw_MAC(p, dst[i], suboffset(interp[i],1), deltas[1]);
384 for (i = 0; i < 4; i++) {
386 brw_MUL(p, dst[i], dst[i], w[3]);
392 void emit_cinterp(struct brw_compile *p,
393 const struct brw_reg *dst,
395 const struct brw_reg *arg0)
397 struct brw_reg interp[4];
398 GLuint nr = arg0[0].nr;
401 interp[0] = brw_vec1_grf(nr, 0);
402 interp[1] = brw_vec1_grf(nr, 4);
403 interp[2] = brw_vec1_grf(nr+1, 0);
404 interp[3] = brw_vec1_grf(nr+1, 4);
406 for (i = 0; i < 4; i++) {
408 brw_MOV(p, dst[i], suboffset(interp[i],3)); /* TODO: optimize away like other moves */
413 /* Sets the destination channels to 1.0 or 0.0 according to glFrontFacing. */
414 void emit_frontfacing(struct brw_compile *p,
415 const struct brw_reg *dst,
418 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
421 if (!(mask & WRITEMASK_XYZW))
424 for (i = 0; i < 4; i++) {
426 brw_MOV(p, dst[i], brw_imm_f(0.0));
430 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
433 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, r1_6ud, brw_imm_ud(1 << 31));
434 for (i = 0; i < 4; i++) {
436 brw_MOV(p, dst[i], brw_imm_f(1.0));
439 brw_set_predicate_control_flag_value(p, 0xff);
442 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
445 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
447 * and we're trying to produce:
450 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
451 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
452 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
453 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
454 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
455 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
456 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
457 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
459 * and add another set of two more subspans if in 16-pixel dispatch mode.
461 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
462 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
463 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
464 * between each other. We could probably do it like ddx and swizzle the right
465 * order later, but bail for now and just produce
466 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
468 void emit_ddxy(struct brw_compile *p,
469 const struct brw_reg *dst,
472 const struct brw_reg *arg0)
475 struct brw_reg src0, src1;
478 brw_set_saturate(p, 1);
479 for (i = 0; i < 4; i++ ) {
482 src0 = brw_reg(arg0[i].file, arg0[i].nr, 1,
484 BRW_VERTICAL_STRIDE_2,
486 BRW_HORIZONTAL_STRIDE_0,
487 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
488 src1 = brw_reg(arg0[i].file, arg0[i].nr, 0,
490 BRW_VERTICAL_STRIDE_2,
492 BRW_HORIZONTAL_STRIDE_0,
493 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
495 src0 = brw_reg(arg0[i].file, arg0[i].nr, 0,
497 BRW_VERTICAL_STRIDE_4,
499 BRW_HORIZONTAL_STRIDE_0,
500 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
501 src1 = brw_reg(arg0[i].file, arg0[i].nr, 2,
503 BRW_VERTICAL_STRIDE_4,
505 BRW_HORIZONTAL_STRIDE_0,
506 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
508 brw_ADD(p, dst[i], src0, negate(src1));
512 brw_set_saturate(p, 0);
515 void emit_alu1(struct brw_compile *p,
516 struct brw_instruction *(*func)(struct brw_compile *,
519 const struct brw_reg *dst,
521 const struct brw_reg *arg0)
526 brw_set_saturate(p, 1);
528 for (i = 0; i < 4; i++) {
530 func(p, dst[i], arg0[i]);
535 brw_set_saturate(p, 0);
539 void emit_alu2(struct brw_compile *p,
540 struct brw_instruction *(*func)(struct brw_compile *,
544 const struct brw_reg *dst,
546 const struct brw_reg *arg0,
547 const struct brw_reg *arg1)
552 brw_set_saturate(p, 1);
554 for (i = 0; i < 4; i++) {
556 func(p, dst[i], arg0[i], arg1[i]);
561 brw_set_saturate(p, 0);
565 void emit_mad(struct brw_compile *p,
566 const struct brw_reg *dst,
568 const struct brw_reg *arg0,
569 const struct brw_reg *arg1,
570 const struct brw_reg *arg2)
574 for (i = 0; i < 4; i++) {
576 brw_MUL(p, dst[i], arg0[i], arg1[i]);
578 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
579 brw_ADD(p, dst[i], dst[i], arg2[i]);
580 brw_set_saturate(p, 0);
585 void emit_lrp(struct brw_compile *p,
586 const struct brw_reg *dst,
588 const struct brw_reg *arg0,
589 const struct brw_reg *arg1,
590 const struct brw_reg *arg2)
594 /* Uses dst as a temporary:
596 for (i = 0; i < 4; i++) {
598 /* Can I use the LINE instruction for this?
600 brw_ADD(p, dst[i], negate(arg0[i]), brw_imm_f(1.0));
601 brw_MUL(p, brw_null_reg(), dst[i], arg2[i]);
603 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
604 brw_MAC(p, dst[i], arg0[i], arg1[i]);
605 brw_set_saturate(p, 0);
610 void emit_sop(struct brw_compile *p,
611 const struct brw_reg *dst,
614 const struct brw_reg *arg0,
615 const struct brw_reg *arg1)
619 for (i = 0; i < 4; i++) {
621 brw_push_insn_state(p);
622 brw_CMP(p, brw_null_reg(), cond, arg0[i], arg1[i]);
623 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
624 brw_MOV(p, dst[i], brw_imm_f(0));
625 brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
626 brw_MOV(p, dst[i], brw_imm_f(1.0));
627 brw_pop_insn_state(p);
632 static void emit_slt( struct brw_compile *p,
633 const struct brw_reg *dst,
635 const struct brw_reg *arg0,
636 const struct brw_reg *arg1 )
638 emit_sop(p, dst, mask, BRW_CONDITIONAL_L, arg0, arg1);
641 static void emit_sle( struct brw_compile *p,
642 const struct brw_reg *dst,
644 const struct brw_reg *arg0,
645 const struct brw_reg *arg1 )
647 emit_sop(p, dst, mask, BRW_CONDITIONAL_LE, arg0, arg1);
650 static void emit_sgt( struct brw_compile *p,
651 const struct brw_reg *dst,
653 const struct brw_reg *arg0,
654 const struct brw_reg *arg1 )
656 emit_sop(p, dst, mask, BRW_CONDITIONAL_G, arg0, arg1);
659 static void emit_sge( struct brw_compile *p,
660 const struct brw_reg *dst,
662 const struct brw_reg *arg0,
663 const struct brw_reg *arg1 )
665 emit_sop(p, dst, mask, BRW_CONDITIONAL_GE, arg0, arg1);
668 static void emit_seq( struct brw_compile *p,
669 const struct brw_reg *dst,
671 const struct brw_reg *arg0,
672 const struct brw_reg *arg1 )
674 emit_sop(p, dst, mask, BRW_CONDITIONAL_EQ, arg0, arg1);
677 static void emit_sne( struct brw_compile *p,
678 const struct brw_reg *dst,
680 const struct brw_reg *arg0,
681 const struct brw_reg *arg1 )
683 emit_sop(p, dst, mask, BRW_CONDITIONAL_NEQ, arg0, arg1);
686 void emit_cmp(struct brw_compile *p,
687 const struct brw_reg *dst,
689 const struct brw_reg *arg0,
690 const struct brw_reg *arg1,
691 const struct brw_reg *arg2)
695 for (i = 0; i < 4; i++) {
697 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0[i], brw_imm_f(0));
699 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
700 brw_SEL(p, dst[i], arg1[i], arg2[i]);
701 brw_set_saturate(p, 0);
702 brw_set_predicate_control_flag_value(p, 0xff);
707 void emit_sign(struct brw_compile *p,
708 const struct brw_reg *dst,
710 const struct brw_reg *arg0)
714 for (i = 0; i < 4; i++) {
716 brw_MOV(p, dst[i], brw_imm_f(0.0));
718 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0[i], brw_imm_f(0));
719 brw_MOV(p, dst[i], brw_imm_f(-1.0));
720 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
722 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, arg0[i], brw_imm_f(0));
723 brw_MOV(p, dst[i], brw_imm_f(1.0));
724 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
729 void emit_max(struct brw_compile *p,
730 const struct brw_reg *dst,
732 const struct brw_reg *arg0,
733 const struct brw_reg *arg1)
737 for (i = 0; i < 4; i++) {
739 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0[i], arg1[i]);
741 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
742 brw_SEL(p, dst[i], arg0[i], arg1[i]);
743 brw_set_saturate(p, 0);
744 brw_set_predicate_control_flag_value(p, 0xff);
749 void emit_min(struct brw_compile *p,
750 const struct brw_reg *dst,
752 const struct brw_reg *arg0,
753 const struct brw_reg *arg1)
757 for (i = 0; i < 4; i++) {
759 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0[i], arg1[i]);
761 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
762 brw_SEL(p, dst[i], arg0[i], arg1[i]);
763 brw_set_saturate(p, 0);
764 brw_set_predicate_control_flag_value(p, 0xff);
770 void emit_dp2(struct brw_compile *p,
771 const struct brw_reg *dst,
773 const struct brw_reg *arg0,
774 const struct brw_reg *arg1)
776 int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1;
778 if (!(mask & WRITEMASK_XYZW))
779 return; /* Do not emit dead code */
781 assert(is_power_of_two(mask & WRITEMASK_XYZW));
783 brw_MUL(p, brw_null_reg(), arg0[0], arg1[0]);
785 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
786 brw_MAC(p, dst[dst_chan], arg0[1], arg1[1]);
787 brw_set_saturate(p, 0);
791 void emit_dp3(struct brw_compile *p,
792 const struct brw_reg *dst,
794 const struct brw_reg *arg0,
795 const struct brw_reg *arg1)
797 int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1;
799 if (!(mask & WRITEMASK_XYZW))
800 return; /* Do not emit dead code */
802 assert(is_power_of_two(mask & WRITEMASK_XYZW));
804 brw_MUL(p, brw_null_reg(), arg0[0], arg1[0]);
805 brw_MAC(p, brw_null_reg(), arg0[1], arg1[1]);
807 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
808 brw_MAC(p, dst[dst_chan], arg0[2], arg1[2]);
809 brw_set_saturate(p, 0);
813 void emit_dp4(struct brw_compile *p,
814 const struct brw_reg *dst,
816 const struct brw_reg *arg0,
817 const struct brw_reg *arg1)
819 int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1;
821 if (!(mask & WRITEMASK_XYZW))
822 return; /* Do not emit dead code */
824 assert(is_power_of_two(mask & WRITEMASK_XYZW));
826 brw_MUL(p, brw_null_reg(), arg0[0], arg1[0]);
827 brw_MAC(p, brw_null_reg(), arg0[1], arg1[1]);
828 brw_MAC(p, brw_null_reg(), arg0[2], arg1[2]);
830 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
831 brw_MAC(p, dst[dst_chan], arg0[3], arg1[3]);
832 brw_set_saturate(p, 0);
836 void emit_dph(struct brw_compile *p,
837 const struct brw_reg *dst,
839 const struct brw_reg *arg0,
840 const struct brw_reg *arg1)
842 const int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1;
844 if (!(mask & WRITEMASK_XYZW))
845 return; /* Do not emit dead code */
847 assert(is_power_of_two(mask & WRITEMASK_XYZW));
849 brw_MUL(p, brw_null_reg(), arg0[0], arg1[0]);
850 brw_MAC(p, brw_null_reg(), arg0[1], arg1[1]);
851 brw_MAC(p, dst[dst_chan], arg0[2], arg1[2]);
853 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
854 brw_ADD(p, dst[dst_chan], dst[dst_chan], arg1[3]);
855 brw_set_saturate(p, 0);
859 void emit_xpd(struct brw_compile *p,
860 const struct brw_reg *dst,
862 const struct brw_reg *arg0,
863 const struct brw_reg *arg1)
867 assert((mask & WRITEMASK_W) != WRITEMASK_W);
869 for (i = 0 ; i < 3; i++) {
874 brw_MUL(p, brw_null_reg(), negate(arg0[i2]), arg1[i1]);
876 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
877 brw_MAC(p, dst[i], arg0[i1], arg1[i2]);
878 brw_set_saturate(p, 0);
884 void emit_math1(struct brw_wm_compile *c,
886 const struct brw_reg *dst,
888 const struct brw_reg *arg0)
890 struct brw_compile *p = &c->func;
891 struct intel_context *intel = &p->brw->intel;
892 int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1;
893 GLuint saturate = ((mask & SATURATE) ?
894 BRW_MATH_SATURATE_SATURATE :
895 BRW_MATH_SATURATE_NONE);
898 if (intel->gen >= 6 && arg0[0].hstride == BRW_HORIZONTAL_STRIDE_0) {
899 /* Gen6 math requires that source and dst horizontal stride be 1.
903 brw_MOV(p, src, arg0[0]);
908 if (!(mask & WRITEMASK_XYZW))
909 return; /* Do not emit dead code */
911 assert(is_power_of_two(mask & WRITEMASK_XYZW));
913 /* Send two messages to perform all 16 operations:
915 brw_push_insn_state(p);
916 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
923 BRW_MATH_DATA_VECTOR,
924 BRW_MATH_PRECISION_FULL);
926 if (c->dispatch_width == 16) {
927 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
929 offset(dst[dst_chan],1),
934 BRW_MATH_DATA_VECTOR,
935 BRW_MATH_PRECISION_FULL);
937 brw_pop_insn_state(p);
941 void emit_math2(struct brw_wm_compile *c,
943 const struct brw_reg *dst,
945 const struct brw_reg *arg0,
946 const struct brw_reg *arg1)
948 struct brw_compile *p = &c->func;
949 struct intel_context *intel = &p->brw->intel;
950 int dst_chan = _mesa_ffs(mask & WRITEMASK_XYZW) - 1;
952 if (!(mask & WRITEMASK_XYZW))
953 return; /* Do not emit dead code */
955 assert(is_power_of_two(mask & WRITEMASK_XYZW));
957 brw_push_insn_state(p);
959 /* math can only operate on up to a vec8 at a time, so in
960 * dispatch_width==16 we have to do the second half manually.
962 if (intel->gen >= 6) {
963 struct brw_reg src0 = arg0[0];
964 struct brw_reg src1 = arg1[0];
965 struct brw_reg temp_dst = dst[dst_chan];
967 if (arg0[0].hstride == BRW_HORIZONTAL_STRIDE_0) {
968 if (arg1[0].hstride == BRW_HORIZONTAL_STRIDE_0) {
969 /* Both scalar arguments. Do scalar calc. */
970 src0.hstride = BRW_HORIZONTAL_STRIDE_1;
971 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
972 temp_dst.hstride = BRW_HORIZONTAL_STRIDE_1;
973 temp_dst.width = BRW_WIDTH_1;
975 if (arg0[0].subnr != 0) {
976 brw_MOV(p, temp_dst, src0);
979 /* Ouch. We've used the temp as a dst, and we still
980 * need a temp to store arg1 in, because src and dst
981 * offsets have to be equal. Leaving this up to
982 * glsl2-965 to handle correctly.
984 assert(arg1[0].subnr == 0);
985 } else if (arg1[0].subnr != 0) {
986 brw_MOV(p, temp_dst, src1);
990 brw_MOV(p, temp_dst, src0);
993 } else if (arg1[0].hstride == BRW_HORIZONTAL_STRIDE_0) {
994 brw_MOV(p, temp_dst, src1);
998 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
999 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1005 if (c->dispatch_width == 16) {
1006 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
1014 /* Splat a scalar result into all the channels. */
1015 if (arg0[0].hstride == BRW_HORIZONTAL_STRIDE_0 &&
1016 arg1[0].hstride == BRW_HORIZONTAL_STRIDE_0) {
1017 temp_dst.hstride = BRW_HORIZONTAL_STRIDE_0;
1018 temp_dst.vstride = BRW_VERTICAL_STRIDE_0;
1019 brw_MOV(p, dst[dst_chan], temp_dst);
1022 GLuint saturate = ((mask & SATURATE) ?
1023 BRW_MATH_SATURATE_SATURATE :
1024 BRW_MATH_SATURATE_NONE);
1026 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1027 brw_MOV(p, brw_message_reg(3), arg1[0]);
1028 if (c->dispatch_width == 16) {
1029 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
1030 brw_MOV(p, brw_message_reg(5), sechalf(arg1[0]));
1033 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1040 BRW_MATH_DATA_VECTOR,
1041 BRW_MATH_PRECISION_FULL);
1043 /* Send two messages to perform all 16 operations:
1045 if (c->dispatch_width == 16) {
1046 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
1048 offset(dst[dst_chan],1),
1053 BRW_MATH_DATA_VECTOR,
1054 BRW_MATH_PRECISION_FULL);
1057 brw_pop_insn_state(p);
1061 void emit_tex(struct brw_wm_compile *c,
1062 struct brw_reg *dst,
1064 struct brw_reg *arg,
1065 struct brw_reg depth_payload,
1070 struct brw_compile *p = &c->func;
1071 struct intel_context *intel = &p->brw->intel;
1072 struct brw_reg dst_retyped;
1073 GLuint cur_mrf = 2, response_length;
1074 GLuint i, nr_texcoords;
1077 GLuint mrf_per_channel;
1080 if (c->dispatch_width == 16) {
1081 mrf_per_channel = 2;
1082 response_length = 8;
1083 dst_retyped = retype(vec16(dst[0]), BRW_REGISTER_TYPE_UW);
1084 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1086 mrf_per_channel = 1;
1087 response_length = 4;
1088 dst_retyped = retype(vec8(dst[0]), BRW_REGISTER_TYPE_UW);
1089 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1092 /* How many input regs are there?
1095 case TEXTURE_1D_INDEX:
1099 case TEXTURE_2D_INDEX:
1100 case TEXTURE_RECT_INDEX:
1101 emit = WRITEMASK_XY;
1104 case TEXTURE_3D_INDEX:
1105 case TEXTURE_CUBE_INDEX:
1106 emit = WRITEMASK_XYZ;
1110 /* unexpected target */
1114 /* Pre-Ironlake, the 8-wide sampler always took u,v,r. */
1115 if (intel->gen < 5 && c->dispatch_width == 8)
1118 /* For shadow comparisons, we have to supply u,v,r. */
1122 /* Emit the texcoords. */
1123 for (i = 0; i < nr_texcoords; i++) {
1125 brw_MOV(p, brw_message_reg(cur_mrf), arg[i]);
1127 brw_MOV(p, brw_message_reg(cur_mrf), brw_imm_f(0));
1128 cur_mrf += mrf_per_channel;
1131 /* Fill in the shadow comparison reference value. */
1133 if (intel->gen >= 5) {
1134 /* Fill in the cube map array index value. */
1135 brw_MOV(p, brw_message_reg(cur_mrf), brw_imm_f(0));
1136 cur_mrf += mrf_per_channel;
1137 } else if (c->dispatch_width == 8) {
1138 /* Fill in the LOD bias value. */
1139 brw_MOV(p, brw_message_reg(cur_mrf), brw_imm_f(0));
1140 cur_mrf += mrf_per_channel;
1142 brw_MOV(p, brw_message_reg(cur_mrf), arg[2]);
1143 cur_mrf += mrf_per_channel;
1146 if (intel->gen >= 5) {
1148 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5;
1150 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_GEN5;
1152 /* Note that G45 and older determines shadow compare and dispatch width
1153 * from message length for most messages.
1155 if (c->dispatch_width == 16 && shadow)
1156 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE;
1158 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE;
1164 retype(depth_payload, BRW_REGISTER_TYPE_UW),
1165 SURF_INDEX_TEXTURE(sampler),
1167 dst_flags & WRITEMASK_XYZW,
1177 void emit_txb(struct brw_wm_compile *c,
1178 struct brw_reg *dst,
1180 struct brw_reg *arg,
1181 struct brw_reg depth_payload,
1185 struct brw_compile *p = &c->func;
1186 struct intel_context *intel = &p->brw->intel;
1189 GLuint mrf_per_channel;
1190 GLuint response_length;
1191 struct brw_reg dst_retyped;
1193 /* The G45 and older chipsets don't support 8-wide dispatch for LOD biased
1194 * samples, so we'll use the 16-wide instruction, leave the second halves
1195 * undefined, and trust the execution mask to keep the undefined pixels
1198 if (c->dispatch_width == 16 || intel->gen < 5) {
1199 if (intel->gen >= 5)
1200 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
1202 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
1203 mrf_per_channel = 2;
1204 dst_retyped = retype(vec16(dst[0]), BRW_REGISTER_TYPE_UW);
1205 response_length = 8;
1207 msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
1208 mrf_per_channel = 1;
1209 dst_retyped = retype(vec8(dst[0]), BRW_REGISTER_TYPE_UW);
1210 response_length = 4;
1213 /* Shadow ignored for txb. */
1215 case TEXTURE_1D_INDEX:
1216 brw_MOV(p, brw_message_reg(2 + 0 * mrf_per_channel), arg[0]);
1217 brw_MOV(p, brw_message_reg(2 + 1 * mrf_per_channel), brw_imm_f(0));
1218 brw_MOV(p, brw_message_reg(2 + 2 * mrf_per_channel), brw_imm_f(0));
1220 case TEXTURE_2D_INDEX:
1221 case TEXTURE_RECT_INDEX:
1222 brw_MOV(p, brw_message_reg(2 + 0 * mrf_per_channel), arg[0]);
1223 brw_MOV(p, brw_message_reg(2 + 1 * mrf_per_channel), arg[1]);
1224 brw_MOV(p, brw_message_reg(2 + 2 * mrf_per_channel), brw_imm_f(0));
1226 case TEXTURE_3D_INDEX:
1227 case TEXTURE_CUBE_INDEX:
1228 brw_MOV(p, brw_message_reg(2 + 0 * mrf_per_channel), arg[0]);
1229 brw_MOV(p, brw_message_reg(2 + 1 * mrf_per_channel), arg[1]);
1230 brw_MOV(p, brw_message_reg(2 + 2 * mrf_per_channel), arg[2]);
1233 /* unexpected target */
1237 brw_MOV(p, brw_message_reg(2 + 3 * mrf_per_channel), arg[3]);
1238 msgLength = 2 + 4 * mrf_per_channel - 1;
1243 retype(depth_payload, BRW_REGISTER_TYPE_UW),
1244 SURF_INDEX_TEXTURE(sampler),
1246 dst_flags & WRITEMASK_XYZW,
1252 BRW_SAMPLER_SIMD_MODE_SIMD16);
1256 static void emit_lit(struct brw_wm_compile *c,
1257 const struct brw_reg *dst,
1259 const struct brw_reg *arg0)
1261 struct brw_compile *p = &c->func;
1263 assert((mask & WRITEMASK_XW) == 0);
1265 if (mask & WRITEMASK_Y) {
1266 brw_set_saturate(p, (mask & SATURATE) ? 1 : 0);
1267 brw_MOV(p, dst[1], arg0[0]);
1268 brw_set_saturate(p, 0);
1271 if (mask & WRITEMASK_Z) {
1272 emit_math2(c, BRW_MATH_FUNCTION_POW,
1274 WRITEMASK_X | (mask & SATURATE),
1279 /* Ordinarily you'd use an iff statement to skip or shortcircuit
1280 * some of the POW calculations above, but 16-wide iff statements
1281 * seem to lock c1 hardware, so this is a nasty workaround:
1283 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_LE, arg0[0], brw_imm_f(0));
1285 if (mask & WRITEMASK_Y)
1286 brw_MOV(p, dst[1], brw_imm_f(0));
1288 if (mask & WRITEMASK_Z)
1289 brw_MOV(p, dst[2], brw_imm_f(0));
1291 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1295 /* Kill pixel - set execution mask to zero for those pixels which
1298 static void emit_kil( struct brw_wm_compile *c,
1299 struct brw_reg *arg0)
1301 struct brw_compile *p = &c->func;
1302 struct brw_reg r0uw = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
1305 for (i = 0; i < 4; i++) {
1306 /* Check if we've already done the comparison for this reg
1307 * -- common when someone does KIL TEMP.wwww.
1309 for (j = 0; j < i; j++) {
1310 if (memcmp(&arg0[j], &arg0[i], sizeof(arg0[0])) == 0)
1316 brw_push_insn_state(p);
1317 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0[i], brw_imm_f(0));
1318 brw_set_predicate_control_flag_value(p, 0xff);
1319 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1320 brw_AND(p, r0uw, brw_flag_reg(), r0uw);
1321 brw_pop_insn_state(p);
1325 /* KIL_NV kills the pixels that are currently executing, not based on a test
1328 void emit_kil_nv( struct brw_wm_compile *c )
1330 struct brw_compile *p = &c->func;
1331 struct brw_reg r0uw = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
1333 brw_push_insn_state(p);
1334 brw_set_mask_control(p, BRW_MASK_DISABLE);
1335 brw_NOT(p, c->emit_mask_reg, brw_mask_reg(1)); /* IMASK */
1336 brw_AND(p, r0uw, c->emit_mask_reg, r0uw);
1337 brw_pop_insn_state(p);
1340 static void fire_fb_write( struct brw_wm_compile *c,
1346 struct brw_compile *p = &c->func;
1347 struct intel_context *intel = &p->brw->intel;
1350 if (c->dispatch_width == 16)
1351 dst = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1353 dst = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1355 /* Pass through control information:
1357 /* mov (8) m1.0<1>:ud r1.0<8;8,1>:ud { Align1 NoMask } */
1358 if (intel->gen < 6) /* gen6, use headerless for fb write */
1360 brw_push_insn_state(p);
1361 brw_set_mask_control(p, BRW_MASK_DISABLE); /* ? */
1362 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1364 brw_message_reg(base_reg + 1),
1365 brw_vec8_grf(1, 0));
1366 brw_pop_insn_state(p);
1369 /* Send framebuffer write message: */
1370 /* send (16) null.0<1>:uw m0 r0.0<8;8,1>:uw 0x85a04000:ud { Align1 EOT } */
1375 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
1383 static void emit_aa( struct brw_wm_compile *c,
1384 struct brw_reg *arg1,
1387 struct brw_compile *p = &c->func;
1388 GLuint comp = c->key.aa_dest_stencil_reg / 2;
1389 GLuint off = c->key.aa_dest_stencil_reg % 2;
1390 struct brw_reg aa = offset(arg1[comp], off);
1392 brw_push_insn_state(p);
1393 brw_set_compression_control(p, BRW_COMPRESSION_NONE); /* ?? */
1394 brw_MOV(p, brw_message_reg(reg), aa);
1395 brw_pop_insn_state(p);
1399 /* Post-fragment-program processing. Send the results to the
1401 * \param arg0 the fragment color
1402 * \param arg1 the pass-through depth value
1403 * \param arg2 the shader-computed depth value
1405 void emit_fb_write(struct brw_wm_compile *c,
1406 struct brw_reg *arg0,
1407 struct brw_reg *arg1,
1408 struct brw_reg *arg2,
1412 struct brw_compile *p = &c->func;
1413 struct brw_context *brw = p->brw;
1414 struct intel_context *intel = &brw->intel;
1417 int base_reg; /* For gen6 fb write with no header, starting from color payload directly!. */
1419 /* Reserve a space for AA - may not be needed:
1421 if (c->key.aa_dest_stencil_reg)
1424 /* I don't really understand how this achieves the color interleave
1425 * (ie RGBARGBA) in the result: [Do the saturation here]
1427 brw_push_insn_state(p);
1429 if (intel->gen >= 6)
1434 for (channel = 0; channel < 4; channel++) {
1435 if (intel->gen >= 6) {
1436 /* gen6 SIMD16 single source DP write looks like:
1446 if (c->dispatch_width == 16) {
1447 brw_MOV(p, brw_message_reg(nr + channel * 2), arg0[channel]);
1449 brw_MOV(p, brw_message_reg(nr + channel), arg0[channel]);
1451 } else if (c->dispatch_width == 16 && brw->has_compr4) {
1452 /* pre-gen6 SIMD16 single source DP write looks like:
1462 * By setting the high bit of the MRF register number, we indicate
1463 * that we want COMPR4 mode - instead of doing the usual destination
1464 * + 1 for the second half we get destination + 4.
1467 brw_message_reg(nr + channel + BRW_MRF_COMPR4),
1470 /* mov (8) m2.0<1>:ud r28.0<8;8,1>:ud { Align1 } */
1471 /* mov (8) m6.0<1>:ud r29.0<8;8,1>:ud { Align1 SecHalf } */
1472 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1474 brw_message_reg(nr + channel),
1477 if (c->dispatch_width == 16) {
1478 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
1480 brw_message_reg(nr + channel + 4),
1481 sechalf(arg0[channel]));
1485 /* skip over the regs populated above:
1487 if (c->dispatch_width == 16)
1492 brw_pop_insn_state(p);
1494 if (c->key.source_depth_to_render_target)
1496 if (c->key.computes_depth)
1497 brw_MOV(p, brw_message_reg(nr), arg2[2]);
1499 brw_MOV(p, brw_message_reg(nr), arg1[1]); /* ? */
1504 if (c->key.dest_depth_reg)
1506 GLuint comp = c->key.dest_depth_reg / 2;
1507 GLuint off = c->key.dest_depth_reg % 2;
1510 brw_push_insn_state(p);
1511 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1513 brw_MOV(p, brw_message_reg(nr), offset(arg1[comp],1));
1515 brw_MOV(p, brw_message_reg(nr+1), arg1[comp+1]);
1516 brw_pop_insn_state(p);
1519 brw_MOV(p, brw_message_reg(nr), arg1[comp]);
1524 if (intel->gen >= 6) {
1525 /* Subtract off the message header, since we send headerless. */
1529 if (!c->key.runtime_check_aads_emit) {
1530 if (c->key.aa_dest_stencil_reg)
1531 emit_aa(c, arg1, 2);
1533 fire_fb_write(c, base_reg, nr, target, eot);
1536 struct brw_reg v1_null_ud = vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
1537 struct brw_reg ip = brw_ip_reg();
1538 struct brw_instruction *jmp;
1540 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1541 brw_set_conditionalmod(p, BRW_CONDITIONAL_Z);
1544 get_element_ud(brw_vec8_grf(1,0), 6),
1547 jmp = brw_JMPI(p, ip, ip, brw_imm_w(0));
1549 emit_aa(c, arg1, 2);
1550 fire_fb_write(c, 0, nr, target, eot);
1551 /* note - thread killed in subroutine */
1553 brw_land_fwd_jump(p, jmp);
1555 /* ELSE: Shuffle up one register to fill in the hole left for AA:
1557 fire_fb_write(c, 1, nr-1, target, eot);
1562 * Move a GPR to scratch memory.
1564 static void emit_spill( struct brw_wm_compile *c,
1568 struct brw_compile *p = &c->func;
1571 mov (16) m2.0<1>:ud r2.0<8;8,1>:ud { Align1 Compr }
1573 brw_MOV(p, brw_message_reg(2), reg);
1576 mov (1) r0.2<1>:d 0x00000080:d { Align1 NoMask }
1577 send (16) null.0<1>:uw m1 r0.0<8;8,1>:uw 0x053003ff:ud { Align1 }
1580 retype(vec16(brw_vec8_grf(0, 0)), BRW_REGISTER_TYPE_UW),
1586 * Load a GPR from scratch memory.
1588 static void emit_unspill( struct brw_wm_compile *c,
1592 struct brw_compile *p = &c->func;
1594 /* Slot 0 is the undef value.
1597 brw_MOV(p, reg, brw_imm_f(0));
1602 mov (1) r0.2<1>:d 0x000000c0:d { Align1 NoMask }
1603 send (16) r110.0<1>:uw m1 r0.0<8;8,1>:uw 0x041243ff:ud { Align1 }
1607 retype(vec16(reg), BRW_REGISTER_TYPE_UW),
1613 * Retrieve up to 4 GEN4 register pairs for the given wm reg:
1614 * Args with unspill_reg != 0 will be loaded from scratch memory.
1616 static void get_argument_regs( struct brw_wm_compile *c,
1617 struct brw_wm_ref *arg[],
1618 struct brw_reg *regs )
1622 for (i = 0; i < 4; i++) {
1624 if (arg[i]->unspill_reg)
1626 brw_vec8_grf(arg[i]->unspill_reg, 0),
1627 arg[i]->value->spill_slot);
1629 regs[i] = arg[i]->hw_reg;
1632 regs[i] = brw_null_reg();
1639 * For values that have a spill_slot!=0, write those regs to scratch memory.
1641 static void spill_values( struct brw_wm_compile *c,
1642 struct brw_wm_value *values,
1647 for (i = 0; i < nr; i++)
1648 if (values[i].spill_slot)
1649 emit_spill(c, values[i].hw_reg, values[i].spill_slot);
1653 /* Emit the fragment program instructions here.
1655 void brw_wm_emit( struct brw_wm_compile *c )
1657 struct brw_compile *p = &c->func;
1658 struct intel_context *intel = &p->brw->intel;
1661 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1662 if (intel->gen >= 6)
1663 brw_set_acc_write_control(p, 1);
1665 /* Check if any of the payload regs need to be spilled:
1667 spill_values(c, c->payload.depth, 4);
1668 spill_values(c, c->creg, c->nr_creg);
1669 spill_values(c, c->payload.input_interp, FRAG_ATTRIB_MAX);
1672 for (insn = 0; insn < c->nr_insns; insn++) {
1674 struct brw_wm_instruction *inst = &c->instruction[insn];
1675 struct brw_reg args[3][4], dst[4];
1676 GLuint i, dst_flags;
1678 /* Get argument regs:
1680 for (i = 0; i < 3; i++)
1681 get_argument_regs(c, inst->src[i], args[i]);
1685 for (i = 0; i < 4; i++)
1687 dst[i] = inst->dst[i]->hw_reg;
1689 dst[i] = brw_null_reg();
1693 dst_flags = inst->writemask;
1695 dst_flags |= SATURATE;
1697 switch (inst->opcode) {
1698 /* Generated instructions for calculating triangle interpolants:
1701 emit_pixel_xy(c, dst, dst_flags);
1705 emit_delta_xy(p, dst, dst_flags, args[0]);
1709 emit_wpos_xy(c, dst, dst_flags, args[0]);
1713 emit_pixel_w(c, dst, dst_flags, args[0], args[1]);
1717 emit_linterp(p, dst, dst_flags, args[0], args[1]);
1721 emit_pinterp(p, dst, dst_flags, args[0], args[1], args[2]);
1725 emit_cinterp(p, dst, dst_flags, args[0]);
1729 emit_fb_write(c, args[0], args[1], args[2], inst->target, inst->eot);
1732 case WM_FRONTFACING:
1733 emit_frontfacing(p, dst, dst_flags);
1736 /* Straightforward arithmetic:
1739 emit_alu2(p, brw_ADD, dst, dst_flags, args[0], args[1]);
1743 emit_alu1(p, brw_FRC, dst, dst_flags, args[0]);
1747 emit_alu1(p, brw_RNDD, dst, dst_flags, args[0]);
1751 emit_ddxy(p, dst, dst_flags, GL_TRUE, args[0]);
1755 emit_ddxy(p, dst, dst_flags, GL_FALSE, args[0]);
1759 emit_dp2(p, dst, dst_flags, args[0], args[1]);
1763 emit_dp3(p, dst, dst_flags, args[0], args[1]);
1767 emit_dp4(p, dst, dst_flags, args[0], args[1]);
1771 emit_dph(p, dst, dst_flags, args[0], args[1]);
1775 emit_alu1(p, brw_RNDZ, dst, dst_flags, args[0]);
1779 emit_lrp(p, dst, dst_flags, args[0], args[1], args[2]);
1783 emit_mad(p, dst, dst_flags, args[0], args[1], args[2]);
1788 emit_alu1(p, brw_MOV, dst, dst_flags, args[0]);
1792 emit_alu2(p, brw_MUL, dst, dst_flags, args[0], args[1]);
1796 emit_xpd(p, dst, dst_flags, args[0], args[1]);
1799 /* Higher math functions:
1802 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, dst_flags, args[0]);
1806 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, dst_flags, args[0]);
1810 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, dst_flags, args[0]);
1814 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, dst_flags, args[0]);
1818 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, dst_flags, args[0]);
1822 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, dst_flags, args[0]);
1826 /* There is an scs math function, but it would need some
1827 * fixup for 16-element execution.
1829 if (dst_flags & WRITEMASK_X)
1830 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, (dst_flags&SATURATE)|WRITEMASK_X, args[0]);
1831 if (dst_flags & WRITEMASK_Y)
1832 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst+1, (dst_flags&SATURATE)|WRITEMASK_X, args[0]);
1836 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, dst_flags, args[0], args[1]);
1842 emit_cmp(p, dst, dst_flags, args[0], args[1], args[2]);
1846 emit_max(p, dst, dst_flags, args[0], args[1]);
1850 emit_min(p, dst, dst_flags, args[0], args[1]);
1854 emit_slt(p, dst, dst_flags, args[0], args[1]);
1858 emit_sle(p, dst, dst_flags, args[0], args[1]);
1861 emit_sgt(p, dst, dst_flags, args[0], args[1]);
1864 emit_sge(p, dst, dst_flags, args[0], args[1]);
1867 emit_seq(p, dst, dst_flags, args[0], args[1]);
1870 emit_sne(p, dst, dst_flags, args[0], args[1]);
1874 emit_sign(p, dst, dst_flags, args[0]);
1878 emit_lit(c, dst, dst_flags, args[0]);
1881 /* Texturing operations:
1884 emit_tex(c, dst, dst_flags, args[0], c->payload.depth[0].hw_reg,
1885 inst->tex_idx, inst->tex_unit,
1890 emit_txb(c, dst, dst_flags, args[0], c->payload.depth[0].hw_reg,
1891 inst->tex_idx, inst->tex_unit);
1895 emit_kil(c, args[0]);
1903 printf("Unsupported opcode %i (%s) in fragment shader\n",
1904 inst->opcode, inst->opcode < MAX_OPCODE ?
1905 _mesa_opcode_string(inst->opcode) :
1909 for (i = 0; i < 4; i++)
1910 if (inst->dst[i] && inst->dst[i]->spill_slot)
1912 inst->dst[i]->hw_reg,
1913 inst->dst[i]->spill_slot);
1916 /* Only properly tested on ILK */
1917 if (p->brw->intel.gen == 5) {
1918 brw_remove_duplicate_mrf_moves(p);
1919 if (c->dispatch_width == 16)
1920 brw_remove_grf_to_mrf_moves(p);
1923 if (INTEL_DEBUG & DEBUG_WM) {
1926 printf("wm-native:\n");
1927 for (i = 0; i < p->nr_insn; i++)
1928 brw_disasm(stdout, &p->store[i], p->brw->intel.gen);