2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /** @file brw_fs_emit.cpp
26 * This file supports emitting code from the FS LIR to the actual
27 * native instructions.
31 #include "main/macros.h"
32 #include "brw_context.h"
37 #include "../glsl/ir_print_visitor.h"
40 fs_visitor::generate_fb_write(fs_inst *inst)
42 GLboolean eot = inst->eot;
43 struct brw_reg implied_header;
45 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
48 brw_push_insn_state(p);
49 brw_set_mask_control(p, BRW_MASK_DISABLE);
50 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
52 if (inst->header_present) {
53 if (intel->gen >= 6) {
54 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
56 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD),
57 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
58 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
60 if (inst->target > 0) {
61 /* Set the render target index for choosing BLEND_STATE. */
62 brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
64 BRW_REGISTER_TYPE_UD),
65 brw_imm_ud(inst->target));
68 implied_header = brw_null_reg();
70 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
73 brw_message_reg(inst->base_mrf + 1),
77 implied_header = brw_null_reg();
80 brw_pop_insn_state(p);
90 inst->header_present);
93 /* Computes the integer pixel x,y values from the origin.
95 * This is the basis of gl_FragCoord computation, but is also used
96 * pre-gen6 for computing the deltas from v0 for computing
100 fs_visitor::generate_pixel_xy(struct brw_reg dst, bool is_x)
102 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
104 struct brw_reg deltas;
107 src = stride(suboffset(g1_uw, 4), 2, 4, 0);
108 deltas = brw_imm_v(0x10101010);
110 src = stride(suboffset(g1_uw, 5), 2, 4, 0);
111 deltas = brw_imm_v(0x11001100);
114 if (c->dispatch_width == 16) {
118 /* We do this 8 or 16-wide, but since the destination is UW we
119 * don't do compression in the 16-wide case.
121 brw_push_insn_state(p);
122 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
123 brw_ADD(p, dst, src, deltas);
124 brw_pop_insn_state(p);
128 fs_visitor::generate_linterp(fs_inst *inst,
129 struct brw_reg dst, struct brw_reg *src)
131 struct brw_reg delta_x = src[0];
132 struct brw_reg delta_y = src[1];
133 struct brw_reg interp = src[2];
136 delta_y.nr == delta_x.nr + 1 &&
137 (intel->gen >= 6 || (delta_x.nr & 1) == 0)) {
138 brw_PLN(p, dst, interp, delta_x);
140 brw_LINE(p, brw_null_reg(), interp, delta_x);
141 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
146 fs_visitor::generate_math(fs_inst *inst,
147 struct brw_reg dst, struct brw_reg *src)
151 switch (inst->opcode) {
153 op = BRW_MATH_FUNCTION_INV;
156 op = BRW_MATH_FUNCTION_RSQ;
159 op = BRW_MATH_FUNCTION_SQRT;
162 op = BRW_MATH_FUNCTION_EXP;
165 op = BRW_MATH_FUNCTION_LOG;
168 op = BRW_MATH_FUNCTION_POW;
171 op = BRW_MATH_FUNCTION_SIN;
174 op = BRW_MATH_FUNCTION_COS;
177 assert(!"not reached: unknown math function");
182 if (intel->gen >= 6) {
183 assert(inst->mlen == 0);
185 if (inst->opcode == FS_OPCODE_POW) {
186 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
187 brw_math2(p, dst, op, src[0], src[1]);
189 if (c->dispatch_width == 16) {
190 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
191 brw_math2(p, sechalf(dst), op, sechalf(src[0]), sechalf(src[1]));
192 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
195 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
198 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
199 BRW_MATH_SATURATE_NONE,
201 BRW_MATH_DATA_VECTOR,
202 BRW_MATH_PRECISION_FULL);
204 if (c->dispatch_width == 16) {
205 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
206 brw_math(p, sechalf(dst),
208 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
209 BRW_MATH_SATURATE_NONE,
211 BRW_MATH_DATA_VECTOR,
212 BRW_MATH_PRECISION_FULL);
213 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
216 } else /* gen <= 5 */{
217 assert(inst->mlen >= 1);
219 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
222 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
223 BRW_MATH_SATURATE_NONE,
224 inst->base_mrf, src[0],
225 BRW_MATH_DATA_VECTOR,
226 BRW_MATH_PRECISION_FULL);
228 if (c->dispatch_width == 16) {
229 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
230 brw_math(p, sechalf(dst),
232 inst->saturate ? BRW_MATH_SATURATE_SATURATE :
233 BRW_MATH_SATURATE_NONE,
234 inst->base_mrf + 1, sechalf(src[0]),
235 BRW_MATH_DATA_VECTOR,
236 BRW_MATH_PRECISION_FULL);
238 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
244 fs_visitor::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
248 uint32_t simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
250 if (c->dispatch_width == 16)
251 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
253 if (intel->gen >= 5) {
254 switch (inst->opcode) {
256 if (inst->shadow_compare) {
257 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
259 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
263 if (inst->shadow_compare) {
264 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
266 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
270 if (inst->shadow_compare) {
271 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
273 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
277 /* There is no sample_d_c message; comparisons are done manually */
278 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
282 switch (inst->opcode) {
284 /* Note that G45 and older determines shadow compare and dispatch width
285 * from message length for most messages.
287 assert(c->dispatch_width == 8);
288 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
289 if (inst->shadow_compare) {
290 assert(inst->mlen == 6);
292 assert(inst->mlen <= 4);
296 if (inst->shadow_compare) {
297 assert(inst->mlen == 6);
298 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
300 assert(inst->mlen == 9);
301 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
302 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
306 if (inst->shadow_compare) {
307 assert(inst->mlen == 6);
308 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
310 assert(inst->mlen == 9);
311 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
312 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
316 /* There is no sample_d_c message; comparisons are done manually */
317 assert(inst->mlen == 7 || inst->mlen == 10);
318 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS;
322 assert(msg_type != -1);
324 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
330 retype(dst, BRW_REGISTER_TYPE_UW),
333 SURF_INDEX_TEXTURE(inst->sampler),
340 inst->header_present,
345 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
348 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
350 * and we're trying to produce:
353 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
354 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
355 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
356 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
357 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
358 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
359 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
360 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
362 * and add another set of two more subspans if in 16-pixel dispatch mode.
364 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
365 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
366 * pair. But for DDY, it's harder, as we want to produce the pairs swizzled
367 * between each other. We could probably do it like ddx and swizzle the right
368 * order later, but bail for now and just produce
369 * ((ss0.tl - ss0.bl)x4 (ss1.tl - ss1.bl)x4)
372 fs_visitor::generate_ddx(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
374 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
376 BRW_VERTICAL_STRIDE_2,
378 BRW_HORIZONTAL_STRIDE_0,
379 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
380 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
382 BRW_VERTICAL_STRIDE_2,
384 BRW_HORIZONTAL_STRIDE_0,
385 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
386 brw_ADD(p, dst, src0, negate(src1));
390 fs_visitor::generate_ddy(fs_inst *inst, struct brw_reg dst, struct brw_reg src)
392 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
394 BRW_VERTICAL_STRIDE_4,
396 BRW_HORIZONTAL_STRIDE_0,
397 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
398 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
400 BRW_VERTICAL_STRIDE_4,
402 BRW_HORIZONTAL_STRIDE_0,
403 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
404 brw_ADD(p, dst, src0, negate(src1));
408 fs_visitor::generate_discard(fs_inst *inst)
410 struct brw_reg f0 = brw_flag_reg();
412 if (intel->gen >= 6) {
413 struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
414 struct brw_reg some_register;
416 /* As of gen6, we no longer have the mask register to look at,
417 * so life gets a bit more complicated.
420 /* Load the flag register with all ones. */
421 brw_push_insn_state(p);
422 brw_set_mask_control(p, BRW_MASK_DISABLE);
423 brw_MOV(p, f0, brw_imm_uw(0xffff));
424 brw_pop_insn_state(p);
426 /* Do a comparison that should always fail, to produce 0s in the flag
427 * reg where we have active channels.
429 some_register = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
430 brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
431 BRW_CONDITIONAL_NZ, some_register, some_register);
433 /* Undo CMP's whacking of predication*/
434 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
436 brw_push_insn_state(p);
437 brw_set_mask_control(p, BRW_MASK_DISABLE);
438 brw_AND(p, g1, f0, g1);
439 brw_pop_insn_state(p);
441 struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
443 brw_push_insn_state(p);
444 brw_set_mask_control(p, BRW_MASK_DISABLE);
445 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
447 /* Unlike the 965, we have the mask reg, so we just need
448 * somewhere to invert that (containing channels to be disabled)
449 * so it can be ANDed with the mask of pixels still to be
450 * written. Use the flag reg for consistency with gen6+.
452 brw_NOT(p, f0, brw_mask_reg(1)); /* IMASK */
453 brw_AND(p, g0, f0, g0);
455 brw_pop_insn_state(p);
460 fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
462 assert(inst->mlen != 0);
465 retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
466 retype(src, BRW_REGISTER_TYPE_UD));
467 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
472 fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
474 assert(inst->mlen != 0);
476 /* Clear any post destination dependencies that would be ignored by
477 * the block read. See the B-Spec for pre-gen5 send instruction.
479 * This could use a better solution, since texture sampling and
480 * math reads could potentially run into it as well -- anywhere
481 * that we have a SEND with a destination that is a register that
482 * was written but not read within the last N instructions (what's
483 * N? unsure). This is rare because of dead code elimination, but
486 if (intel->gen == 4 && !intel->is_g4x)
487 brw_MOV(p, brw_null_reg(), dst);
489 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
492 if (intel->gen == 4 && !intel->is_g4x) {
493 /* gen4 errata: destination from a send can't be used as a
494 * destination until it's been read. Just read it so we don't
497 brw_MOV(p, brw_null_reg(), dst);
502 fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
504 assert(inst->mlen != 0);
506 /* Clear any post destination dependencies that would be ignored by
507 * the block read. See the B-Spec for pre-gen5 send instruction.
509 * This could use a better solution, since texture sampling and
510 * math reads could potentially run into it as well -- anywhere
511 * that we have a SEND with a destination that is a register that
512 * was written but not read within the last N instructions (what's
513 * N? unsure). This is rare because of dead code elimination, but
516 if (intel->gen == 4 && !intel->is_g4x)
517 brw_MOV(p, brw_null_reg(), dst);
519 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
520 inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
522 if (intel->gen == 4 && !intel->is_g4x) {
523 /* gen4 errata: destination from a send can't be used as a
524 * destination until it's been read. Just read it so we don't
527 brw_MOV(p, brw_null_reg(), dst);
531 static struct brw_reg
532 brw_reg_from_fs_reg(fs_reg *reg)
534 struct brw_reg brw_reg;
540 if (reg->smear == -1) {
541 brw_reg = brw_vec8_reg(reg->file,
544 brw_reg = brw_vec1_reg(reg->file,
545 reg->hw_reg, reg->smear);
547 brw_reg = retype(brw_reg, reg->type);
549 brw_reg = sechalf(brw_reg);
553 case BRW_REGISTER_TYPE_F:
554 brw_reg = brw_imm_f(reg->imm.f);
556 case BRW_REGISTER_TYPE_D:
557 brw_reg = brw_imm_d(reg->imm.i);
559 case BRW_REGISTER_TYPE_UD:
560 brw_reg = brw_imm_ud(reg->imm.u);
563 assert(!"not reached");
564 brw_reg = brw_null_reg();
569 brw_reg = reg->fixed_hw_reg;
572 /* Probably unused. */
573 brw_reg = brw_null_reg();
576 assert(!"not reached");
577 brw_reg = brw_null_reg();
580 assert(!"not reached");
581 brw_reg = brw_null_reg();
585 brw_reg = brw_abs(brw_reg);
587 brw_reg = negate(brw_reg);
593 fs_visitor::generate_code()
595 int last_native_inst = p->nr_insn;
596 const char *last_annotation_string = NULL;
597 ir_instruction *last_annotation_ir = NULL;
599 int loop_stack_array_size = 16;
600 int loop_stack_depth = 0;
601 brw_instruction **loop_stack =
602 rzalloc_array(this->mem_ctx, brw_instruction *, loop_stack_array_size);
603 int *if_depth_in_loop =
604 rzalloc_array(this->mem_ctx, int, loop_stack_array_size);
607 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
608 printf("Native code for fragment shader %d (%d-wide dispatch):\n",
609 prog->Name, c->dispatch_width);
612 foreach_iter(exec_list_iterator, iter, this->instructions) {
613 fs_inst *inst = (fs_inst *)iter.get();
614 struct brw_reg src[3], dst;
616 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
617 if (last_annotation_ir != inst->ir) {
618 last_annotation_ir = inst->ir;
619 if (last_annotation_ir) {
621 last_annotation_ir->print();
625 if (last_annotation_string != inst->annotation) {
626 last_annotation_string = inst->annotation;
627 if (last_annotation_string)
628 printf(" %s\n", last_annotation_string);
632 for (unsigned int i = 0; i < 3; i++) {
633 src[i] = brw_reg_from_fs_reg(&inst->src[i]);
635 dst = brw_reg_from_fs_reg(&inst->dst);
637 brw_set_conditionalmod(p, inst->conditional_mod);
638 brw_set_predicate_control(p, inst->predicated);
639 brw_set_predicate_inverse(p, inst->predicate_inverse);
640 brw_set_saturate(p, inst->saturate);
642 if (inst->force_uncompressed || c->dispatch_width == 8) {
643 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
644 } else if (inst->force_sechalf) {
645 brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
647 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
650 switch (inst->opcode) {
652 brw_MOV(p, dst, src[0]);
655 brw_ADD(p, dst, src[0], src[1]);
658 brw_MUL(p, dst, src[0], src[1]);
662 brw_FRC(p, dst, src[0]);
664 case BRW_OPCODE_RNDD:
665 brw_RNDD(p, dst, src[0]);
667 case BRW_OPCODE_RNDE:
668 brw_RNDE(p, dst, src[0]);
670 case BRW_OPCODE_RNDZ:
671 brw_RNDZ(p, dst, src[0]);
675 brw_AND(p, dst, src[0], src[1]);
678 brw_OR(p, dst, src[0], src[1]);
681 brw_XOR(p, dst, src[0], src[1]);
684 brw_NOT(p, dst, src[0]);
687 brw_ASR(p, dst, src[0], src[1]);
690 brw_SHR(p, dst, src[0], src[1]);
693 brw_SHL(p, dst, src[0], src[1]);
697 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
700 brw_SEL(p, dst, src[0], src[1]);
704 if (inst->src[0].file != BAD_FILE) {
705 /* The instruction has an embedded compare (only allowed on gen6) */
706 assert(intel->gen == 6);
707 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
709 brw_IF(p, c->dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
711 if_depth_in_loop[loop_stack_depth]++;
714 case BRW_OPCODE_ELSE:
717 case BRW_OPCODE_ENDIF:
719 if_depth_in_loop[loop_stack_depth]--;
723 loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8);
724 if (loop_stack_array_size <= loop_stack_depth) {
725 loop_stack_array_size *= 2;
726 loop_stack = reralloc(this->mem_ctx, loop_stack, brw_instruction *,
727 loop_stack_array_size);
728 if_depth_in_loop = reralloc(this->mem_ctx, if_depth_in_loop, int,
729 loop_stack_array_size);
731 if_depth_in_loop[loop_stack_depth] = 0;
734 case BRW_OPCODE_BREAK:
735 brw_BREAK(p, if_depth_in_loop[loop_stack_depth]);
736 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
738 case BRW_OPCODE_CONTINUE:
739 /* FINISHME: We need to write the loop instruction support still. */
741 gen6_CONT(p, loop_stack[loop_stack_depth - 1]);
743 brw_CONT(p, if_depth_in_loop[loop_stack_depth]);
744 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
747 case BRW_OPCODE_WHILE: {
748 struct brw_instruction *inst0, *inst1;
754 assert(loop_stack_depth > 0);
756 inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]);
757 if (intel->gen < 6) {
758 /* patch all the BREAK/CONT instructions from last BGNLOOP */
759 while (inst0 > loop_stack[loop_stack_depth]) {
761 if (inst0->header.opcode == BRW_OPCODE_BREAK &&
762 inst0->bits3.if_else.jump_count == 0) {
763 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
765 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
766 inst0->bits3.if_else.jump_count == 0) {
767 inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
782 generate_math(inst, dst, src);
784 case FS_OPCODE_PIXEL_X:
785 generate_pixel_xy(dst, true);
787 case FS_OPCODE_PIXEL_Y:
788 generate_pixel_xy(dst, false);
790 case FS_OPCODE_CINTERP:
791 brw_MOV(p, dst, src[0]);
793 case FS_OPCODE_LINTERP:
794 generate_linterp(inst, dst, src);
800 generate_tex(inst, dst, src[0]);
802 case FS_OPCODE_DISCARD:
803 generate_discard(inst);
806 generate_ddx(inst, dst, src[0]);
809 generate_ddy(inst, dst, src[0]);
812 case FS_OPCODE_SPILL:
813 generate_spill(inst, src[0]);
816 case FS_OPCODE_UNSPILL:
817 generate_unspill(inst, dst);
820 case FS_OPCODE_PULL_CONSTANT_LOAD:
821 generate_pull_constant_load(inst, dst);
824 case FS_OPCODE_FB_WRITE:
825 generate_fb_write(inst);
828 if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) {
829 _mesa_problem(ctx, "Unsupported opcode `%s' in FS",
830 brw_opcodes[inst->opcode].name);
832 _mesa_problem(ctx, "Unsupported opcode %d in FS", inst->opcode);
834 fail("unsupported opcode in FS\n");
837 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
838 for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
840 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
841 ((uint32_t *)&p->store[i])[3],
842 ((uint32_t *)&p->store[i])[2],
843 ((uint32_t *)&p->store[i])[1],
844 ((uint32_t *)&p->store[i])[0]);
846 brw_disasm(stdout, &p->store[i], intel->gen);
850 last_native_inst = p->nr_insn;
853 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
857 ralloc_free(loop_stack);
858 ralloc_free(if_depth_in_loop);
862 /* OK, while the INTEL_DEBUG=wm above is very nice for debugging FS
863 * emit issues, it doesn't get the jump distances into the output,
864 * which is often something we want to debug. So this is here in
865 * case you're doing that.
868 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
869 for (unsigned int i = 0; i < p->nr_insn; i++) {
870 printf("0x%08x 0x%08x 0x%08x 0x%08x ",
871 ((uint32_t *)&p->store[i])[3],
872 ((uint32_t *)&p->store[i])[2],
873 ((uint32_t *)&p->store[i])[1],
874 ((uint32_t *)&p->store[i])[0]);
875 brw_disasm(stdout, &p->store[i], intel->gen);