return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1); \
}
+#define ALU3(op) \
+ fs_inst * \
+ fs_visitor::op(fs_reg dst, fs_reg src0, fs_reg src1, fs_reg src2) \
+ { \
+ return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1, src2);\
+ }
+
ALU1(NOT)
ALU1(MOV)
ALU1(FRC)
ALU2(SHL)
ALU2(SHR)
ALU2(ASR)
+ALU3(LRP)
/** Gen4 predicated IF. */
fs_inst *
fs_inst *IF(fs_reg src0, fs_reg src1, uint32_t condition);
fs_inst *CMP(fs_reg dst, fs_reg src0, fs_reg src1,
uint32_t condition);
+ fs_inst *LRP(fs_reg dst, fs_reg a, fs_reg y, fs_reg x);
fs_inst *DEP_RESOLVE_MOV(int grf);
int type_size(const struct glsl_type *type);
fs_reg fix_math_operand(fs_reg src);
fs_inst *emit_math(enum opcode op, fs_reg dst, fs_reg src0);
fs_inst *emit_math(enum opcode op, fs_reg dst, fs_reg src0, fs_reg src1);
+ void emit_lrp(fs_reg dst, fs_reg x, fs_reg y, fs_reg a);
void emit_minmax(uint32_t conditionalmod, fs_reg dst,
fs_reg src0, fs_reg src1);
bool try_emit_saturate(ir_expression *ir);
ir_expression *expr = ir->rhs->as_expression();
bool found_vector = false;
unsigned int i, vector_elements = 1;
- ir_variable *op_var[2];
+ ir_variable *op_var[3];
if (!expr)
return visit_continue;
assert(!"not yet supported");
break;
+ case ir_triop_lrp:
+ for (i = 0; i < vector_elements; i++) {
+ ir_rvalue *op0 = get_element(op_var[0], i);
+ ir_rvalue *op1 = get_element(op_var[1], i);
+ ir_rvalue *op2 = get_element(op_var[2], i);
+
+ assign(ir, i, new(mem_ctx) ir_expression(expr->operation,
+ element_type,
+ op0,
+ op1,
+ op2));
+ }
+ break;
+
case ir_unop_pack_snorm_2x16:
case ir_unop_pack_snorm_4x8:
case ir_unop_pack_unorm_2x16:
case BRW_OPCODE_LINE:
case BRW_OPCODE_PLN:
case BRW_OPCODE_MAD:
+ case BRW_OPCODE_LRP:
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
case FS_OPCODE_CINTERP:
case FS_OPCODE_LINTERP:
brw_set_access_mode(p, BRW_ALIGN_1);
break;
+ case BRW_OPCODE_LRP:
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ if (dispatch_width == 16) {
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_LRP(p, dst, src[0], src[1], src[2]);
+ brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+ brw_LRP(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
+ brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+ } else {
+ brw_LRP(p, dst, src[0], src[1], src[2]);
+ }
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ break;
+
case BRW_OPCODE_FRC:
brw_FRC(p, dst, src[0]);
break;
}
void
+fs_visitor::emit_lrp(fs_reg dst, fs_reg x, fs_reg y, fs_reg a)
+{
+ if (intel->gen < 6 || x.file != GRF || y.file != GRF || a.file != GRF) {
+ /* We can't use the LRP instruction. Emit x*(1-a) + y*a. */
+ fs_reg y_times_a = fs_reg(this, glsl_type::float_type);
+ fs_reg one_minus_a = fs_reg(this, glsl_type::float_type);
+ fs_reg x_times_one_minus_a = fs_reg(this, glsl_type::float_type);
+
+ emit(MUL(y_times_a, y, a));
+
+ a.negate = !a.negate;
+ emit(ADD(one_minus_a, fs_reg(1.0f), a));
+ emit(MUL(x_times_one_minus_a, x, one_minus_a));
+
+ emit(ADD(dst, x_times_one_minus_a, y_times_a));
+ } else {
+ /* The LRP instruction actually does op1 * op0 + op2 * (1 - op0), so
+ * we need to reorder the operands.
+ */
+ emit(LRP(dst, a, y, x));
+ }
+}
+
+void
fs_visitor::emit_minmax(uint32_t conditionalmod, fs_reg dst,
fs_reg src0, fs_reg src1)
{
fs_visitor::visit(ir_expression *ir)
{
unsigned int operand;
- fs_reg op[2], temp;
+ fs_reg op[3], temp;
fs_inst *inst;
- assert(ir->get_num_operands() <= 2);
+ assert(ir->get_num_operands() <= 3);
if (try_emit_saturate(ir))
return;
case ir_binop_pack_half_2x16_split:
emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, this->result, op[0], op[1]);
break;
- case ir_binop_ubo_load:
+ case ir_binop_ubo_load: {
/* This IR node takes a constant uniform block and a constant or
* variable byte offset within the block and loads a vector from that.
*/
result.reg_offset = 0;
break;
}
+
+ case ir_triop_lrp:
+ emit_lrp(this->result, op[0], op[1], op[2]);
+ break;
+ }
}
void
*/
brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
do_mat_op_to_vec(shader->ir);
+ const int lrp_to_arith = (intel->gen < 6 || stage != MESA_SHADER_FRAGMENT)
+ ? LRP_TO_ARITH : 0;
lower_instructions(shader->ir,
MOD_TO_FRACT |
DIV_TO_MUL_RCP |
SUB_TO_ADD_NEG |
EXP_TO_EXP2 |
LOG_TO_LOG2 |
- LRP_TO_ARITH);
+ lrp_to_arith);
/* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
* if-statements need to be flattened.