is supported. If it is, DTRUNC/DCEIL/DFLR/DROUND opcodes may be used.
* ``PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED``: Whether DFRACEXP and
DLDEXP are supported.
-* ``PIPE_SHADER_CAP_LDEXP_SUPPORTED``: Whether LDEXP is supported.
* ``PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE``: Whether the driver doesn't
ignore tgsi_declaration_range::Last for shader inputs and outputs.
* ``PIPE_SHADER_CAP_MAX_SHADER_BUFFERS``: Maximum number of memory buffers
.lower_iabs = true,
.lower_fdph = true,
.lower_ffract = true,
+ .lower_ldexp = true,
.lower_pack_half_2x16 = true,
.lower_pack_64_2x32 = true,
.lower_unpack_half_2x16 = true,
bool do_vec_index_to_cond_assign(exec_list *instructions);
bool lower_discard(exec_list *instructions);
void lower_discard_flow(exec_list *instructions);
-bool lower_instructions(exec_list *instructions, bool have_ldexp,
+bool lower_instructions(exec_list *instructions,
bool have_dfrexp, bool have_dround,
bool have_gpu_shader5);
bool lower_clip_cull_distance(struct gl_shader_program *prog,
* rather than in each driver backend.
*
* Currently supported transformations:
- * - LDEXP_TO_ARITH
* - DOPS_TO_DFRAC
*
- * LDEXP_TO_ARITH:
- * -------------
- * Converts ir_binop_ldexp to arithmetic and bit operations for float sources.
- *
* DFREXP_DLDEXP_TO_ARITH:
* ---------------
* Converts ir_binop_ldexp, ir_unop_frexp_sig, and ir_unop_frexp_exp to
#include <math.h>
/* Operations for lower_instructions() */
-#define LDEXP_TO_ARITH 0x80
#define DOPS_TO_DFRAC 0x800
#define DFREXP_DLDEXP_TO_ARITH 0x1000
#define FIND_LSB_TO_FLOAT_CAST 0x20000
private:
unsigned lower; /** Bitfield of which operations to lower */
- void ldexp_to_arith(ir_expression *);
void dldexp_to_arith(ir_expression *);
void dfrexp_sig_to_arith(ir_expression *);
void dfrexp_exp_to_arith(ir_expression *);
#define lowering(x) (this->lower & x)
bool
-lower_instructions(exec_list *instructions, bool have_ldexp, bool have_dfrexp,
+lower_instructions(exec_list *instructions, bool have_dfrexp,
bool have_dround, bool have_gpu_shader5)
{
unsigned what_to_lower =
- (have_ldexp ? 0 : LDEXP_TO_ARITH) |
(have_dfrexp ? 0 : DFREXP_DLDEXP_TO_ARITH) |
(have_dround ? 0 : DOPS_TO_DFRAC) |
/* Assume that if ARB_gpu_shader5 is not supported then all of the
}
void
-lower_instructions_visitor::ldexp_to_arith(ir_expression *ir)
-{
- /* Translates
- * ir_binop_ldexp x exp
- * into
- *
- * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
- * resulting_biased_exp = min(extracted_biased_exp + exp, 255);
- *
- * if (extracted_biased_exp >= 255)
- * return x; // +/-inf, NaN
- *
- * sign_mantissa = bitcast_f2u(x) & sign_mantissa_mask;
- *
- * if (min(resulting_biased_exp, extracted_biased_exp) < 1)
- * resulting_biased_exp = 0;
- * if (resulting_biased_exp >= 255 ||
- * min(resulting_biased_exp, extracted_biased_exp) < 1) {
- * sign_mantissa &= sign_mask;
- * }
- *
- * return bitcast_u2f(sign_mantissa |
- * lshift(i2u(resulting_biased_exp), exp_shift));
- *
- * which we can't actually implement as such, since the GLSL IR doesn't
- * have vectorized if-statements. We actually implement it without branches
- * using conditional-select:
- *
- * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
- * resulting_biased_exp = min(extracted_biased_exp + exp, 255);
- *
- * sign_mantissa = bitcast_f2u(x) & sign_mantissa_mask;
- *
- * flush_to_zero = lequal(min(resulting_biased_exp, extracted_biased_exp), 0);
- * resulting_biased_exp = csel(flush_to_zero, 0, resulting_biased_exp)
- * zero_mantissa = logic_or(flush_to_zero,
- * gequal(resulting_biased_exp, 255));
- * sign_mantissa = csel(zero_mantissa, sign_mantissa & sign_mask, sign_mantissa);
- *
- * result = sign_mantissa |
- * lshift(i2u(resulting_biased_exp), exp_shift));
- *
- * return csel(extracted_biased_exp >= 255, x, bitcast_u2f(result));
- *
- * The definition of ldexp in the GLSL spec says:
- *
- * "If this product is too large to be represented in the
- * floating-point type, the result is undefined."
- *
- * However, the definition of ldexp in the GLSL ES spec does not contain
- * this sentence, so we do need to handle overflow correctly.
- *
- * There is additional language limiting the defined range of exp, but this
- * is merely to allow implementations that store 2^exp in a temporary
- * variable.
- */
-
- const unsigned vec_elem = ir->type->vector_elements;
-
- /* Types */
- const glsl_type *ivec = glsl_type::get_instance(GLSL_TYPE_INT, vec_elem, 1);
- const glsl_type *uvec = glsl_type::get_instance(GLSL_TYPE_UINT, vec_elem, 1);
- const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
-
- /* Temporary variables */
- ir_variable *x = new(ir) ir_variable(ir->type, "x", ir_var_temporary);
- ir_variable *exp = new(ir) ir_variable(ivec, "exp", ir_var_temporary);
- ir_variable *result = new(ir) ir_variable(uvec, "result", ir_var_temporary);
-
- ir_variable *extracted_biased_exp =
- new(ir) ir_variable(ivec, "extracted_biased_exp", ir_var_temporary);
- ir_variable *resulting_biased_exp =
- new(ir) ir_variable(ivec, "resulting_biased_exp", ir_var_temporary);
-
- ir_variable *sign_mantissa =
- new(ir) ir_variable(uvec, "sign_mantissa", ir_var_temporary);
-
- ir_variable *flush_to_zero =
- new(ir) ir_variable(bvec, "flush_to_zero", ir_var_temporary);
- ir_variable *zero_mantissa =
- new(ir) ir_variable(bvec, "zero_mantissa", ir_var_temporary);
-
- ir_instruction &i = *base_ir;
-
- /* Copy <x> and <exp> arguments. */
- i.insert_before(x);
- i.insert_before(assign(x, ir->operands[0]));
- i.insert_before(exp);
- i.insert_before(assign(exp, ir->operands[1]));
-
- /* Extract the biased exponent from <x>. */
- i.insert_before(extracted_biased_exp);
- i.insert_before(assign(extracted_biased_exp,
- rshift(bitcast_f2i(abs(x)),
- new(ir) ir_constant(23, vec_elem))));
-
- /* The definition of ldexp in the GLSL 4.60 spec says:
- *
- * "If exp is greater than +128 (single-precision) or +1024
- * (double-precision), the value returned is undefined. If exp is less
- * than -126 (single-precision) or -1022 (double-precision), the value
- * returned may be flushed to zero."
- *
- * So we do not have to guard against the possibility of addition overflow,
- * which could happen when exp is close to INT_MAX. Addition underflow
- * cannot happen (the worst case is 0 + (-INT_MAX)).
- */
- i.insert_before(resulting_biased_exp);
- i.insert_before(assign(resulting_biased_exp,
- min2(add(extracted_biased_exp, exp),
- new(ir) ir_constant(255, vec_elem))));
-
- i.insert_before(sign_mantissa);
- i.insert_before(assign(sign_mantissa,
- bit_and(bitcast_f2u(x),
- new(ir) ir_constant(0x807fffffu, vec_elem))));
-
- /* We flush to zero if the original or resulting biased exponent is 0,
- * indicating a +/-0.0 or subnormal input or output.
- *
- * The mantissa is set to 0 if the resulting biased exponent is 255, since
- * an overflow should produce a +/-inf result.
- *
- * Note that NaN inputs are handled separately.
- */
- i.insert_before(flush_to_zero);
- i.insert_before(assign(flush_to_zero,
- lequal(min2(resulting_biased_exp,
- extracted_biased_exp),
- ir_constant::zero(ir, ivec))));
- i.insert_before(assign(resulting_biased_exp,
- csel(flush_to_zero,
- ir_constant::zero(ir, ivec),
- resulting_biased_exp)));
-
- i.insert_before(zero_mantissa);
- i.insert_before(assign(zero_mantissa,
- logic_or(flush_to_zero,
- equal(resulting_biased_exp,
- new(ir) ir_constant(255, vec_elem)))));
- i.insert_before(assign(sign_mantissa,
- csel(zero_mantissa,
- bit_and(sign_mantissa,
- new(ir) ir_constant(0x80000000u, vec_elem)),
- sign_mantissa)));
-
- i.insert_before(result);
- i.insert_before(assign(result,
- bitfield_insert(sign_mantissa,
- i2u(resulting_biased_exp),
- new(ir) ir_constant(23u, vec_elem),
- new(ir) ir_constant(8u, vec_elem))));
-
- ir->operation = ir_triop_csel;
- ir->init_num_operands();
- ir->operands[0] = gequal(extracted_biased_exp,
- new(ir) ir_constant(255, vec_elem));
- ir->operands[1] = new(ir) ir_dereference_variable(x);
- ir->operands[2] = bitcast_u2f(result);
-
- this->progress = true;
-}
-
-void
lower_instructions_visitor::dldexp_to_arith(ir_expression *ir)
{
/* See ldexp_to_arith for structure. Uses frexp_exp to extract the exponent
break;
case ir_binop_ldexp:
- if (lowering(LDEXP_TO_ARITH) && ir->type->is_float())
- ldexp_to_arith(ir);
if (lowering(DFREXP_DLDEXP_TO_ARITH) && ir->type->is_double())
dldexp_to_arith(ir);
break;
return lower_discard(ir);
} else if (sscanf(optimization, "lower_instructions ( %d ) ",
&int_0) == 1) {
- return lower_instructions(ir, false, false, false, false);
+ return lower_instructions(ir, false, false, false);
} else {
printf("Unrecognized optimization %s\n", optimization);
exit(EXIT_FAILURE);
return 1;
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
return 0;
case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
return 1;
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
return 1;
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_INT64_ATOMICS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
return 0;
case PIPE_SHADER_CAP_SUPPORTED_IRS:
return 1 << PIPE_SHADER_IR_NIR;
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
return 1;
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
screen->opts.ResourceBindingTier >= D3D12_RESOURCE_BINDING_TIER_3) ?
PIPE_MAX_SHADER_IMAGES : D3D12_PS_CS_UAV_REGISTER_COUNT;
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
case PIPE_SHADER_CAP_CONT_SUPPORTED:
.fuse_ffma64 = true,
.lower_uadd_carry = true,
.lower_usub_borrow = true,
+ .lower_ldexp = true,
.lower_mul_high = true,
.lower_bitops = true,
.lower_all_io_to_temps = true,
: screen->specs.max_vs_uniforms * sizeof(float[4]);
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
return false;
case PIPE_SHADER_CAP_SUPPORTED_IRS:
case PIPE_SHADER_CAP_SUBROUTINES:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
return I915_TEX_UNITS;
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
return irs;
}
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
return 1;
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
return (1 << PIPE_SHADER_IR_TGSI) | (1 << PIPE_SHADER_IR_NIR);
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
return 1;
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_INT64_ATOMICS:
case PIPE_SHADER_CAP_FP16:
case PIPE_SHADER_CAP_INT64_ATOMICS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
return 0;
case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
}
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
return 0;
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
.lower_extract_word = true,
.lower_insert_byte = true,
.lower_insert_word = true,
+ .lower_ldexp = true,
.lower_rotate = true,
/* due to a bug in the shader compiler, some loops hang
* if they are not unrolled, see:
case PIPE_SHADER_CAP_INT64_ATOMICS:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: /* lowered in finalize_nir */
case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: /* lowered in finalize_nir */
return (1 << PIPE_SHADER_IR_TGSI) | (svgascreen->debug.nir ? (1 << PIPE_SHADER_IR_NIR) : 0);
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
return (1 << PIPE_SHADER_IR_TGSI) | (svgascreen->debug.nir ? (1 << PIPE_SHADER_IR_NIR) : 0);
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
return 0;
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
/* For the above cases, we rely on the GLSL compiler to translate/lower
* the TGIS instruction into other instructions we do support.
*/
.lower_int64_options = nir_lower_imul_2x32_64, \
.lower_fdph = true, \
.lower_flrp64 = true, \
+ .lower_ldexp = true, \
.lower_rotate = true, \
.lower_uniforms_to_ubo = true, \
.lower_vector_cmp = true, \
return emit_dtrunc(emit, inst);
/* The following opcodes should never be seen here. We return zero
- * for all the PIPE_CAP_TGSI_DROUND_SUPPORTED, DFRACEXP_DLDEXP_SUPPORTED,
- * LDEXP_SUPPORTED queries.
+ * for all the PIPE_CAP_TGSI_DROUND_SUPPORTED, DFRACEXP_DLDEXP_SUPPORTED queries.
*/
case TGSI_OPCODE_LDEXP:
case TGSI_OPCODE_DSSG:
case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
.lower_ldexp = true,
.lower_fneg = true,
.lower_ineg = true,
+ .lower_ldexp = true,
.lower_rotate = true,
.lower_to_scalar = true,
.lower_umax = true,
case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
case PIPE_SHADER_CAP_DROUND_SUPPORTED:
case PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED:
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
return 0;
case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
}
screen->compiler_options.lower_ffma32 = true;
screen->compiler_options.fuse_ffma32 = false;
+ screen->compiler_options.lower_ldexp = true;
screen->compiler_options.lower_image_offset_to_range_base = true;
screen->compiler_options.lower_atomic_offset_to_range_base = true;
ZINK_MAX_SHADER_IMAGES);
return 0;
- case PIPE_SHADER_CAP_LDEXP_SUPPORTED:
- return 1;
-
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
return 0; /* not implemented */
PIPE_SHADER_CAP_MAX_SHADER_BUFFERS,
PIPE_SHADER_CAP_SUPPORTED_IRS,
PIPE_SHADER_CAP_MAX_SHADER_IMAGES,
- PIPE_SHADER_CAP_LDEXP_SUPPORTED,
PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS,
PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS,
};
PIPE_SHADER_CAP_DROUND_SUPPORTED);
bool have_dfrexp = pscreen->get_shader_param(pscreen, ptarget,
PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED);
- bool have_ldexp = pscreen->get_shader_param(pscreen, ptarget,
- PIPE_SHADER_CAP_LDEXP_SUPPORTED);
if (!pscreen->get_param(pscreen, PIPE_CAP_INT64_DIVMOD))
lower_64bit_integer_instructions(ir, DIV64 | MOD64);
lower_blend_equation_advanced(
shader, ctx->Extensions.KHR_blend_equation_advanced_coherent);
- lower_instructions(ir, have_ldexp, have_dfrexp, have_dround,
+ lower_instructions(ir, have_dfrexp, have_dround,
ctx->Extensions.ARB_gpu_shader5);
do_vec_index_to_cond_assign(ir);
.lower_extract_word = true,
.lower_insert_byte = true,
.lower_insert_word = true,
+ .lower_ldexp = true,
.lower_rotate = true,
.lower_pack_half_2x16 = true,