+2004-01-01 Jan Hubicka <jh@suse.cz>
+
+ * expmed.c (store_bit_field, extract_bit_field): Use new named patterns
+ * expr.c (store_constructor): Use vec_init pattern.
+ * genopinit.c (optabs): Initailize vec_set/vec_extract/vec_init.
+ * optabs.h (optab_index): ADD OTI_vec_set/OTI_vec_extract/OTI_vec_init
+ (vec_set_optab, vec_extract_optab, vec_init_optab): New.
+ * i386.md (vec_setv2df, vec_extractv2df, vec_setv4sf, vec_extractv4sf):
+ New patterns.
+ (sse2_unpc?pd): Fix pattern.
+ (sse2_movlpd): Kill.
+ (sse2_movsd): Deal with movlpd too.
+ * i386.c (ix86_expand_builtin): Use sse2_movsd instead of sse2_movlpd.
+ (ix86_expand_vector_init): New.
+ * emmintrin.h (__mm_set_pd, __mm_set_ps): Use vector extensions.
+ * md.texi (vec_set, vec_extract): Document
+
2003-12-31 Jan Hubicka <jh@suse.cz>
PR opt/13473
static __inline __m128d
_mm_set_pd (double __Z, double __Y)
{
- union {
- double __a[2];
- __m128d __v;
- } __u;
-
- __u.__a[0] = __Y;
- __u.__a[1] = __Z;
-
- return __u.__v;
+ return (__v2df) {__Y, __Z};
}
/* Create the vector [Y Z]. */
extern rtx ix86_tls_get_addr (void);
extern bool ix86_must_pass_in_stack (enum machine_mode mode, tree);
+extern void ix86_expand_vector_init (rtx, rtx);
/* In winnt.c */
extern int i386_pe_dllexport_name_p (const char *);
extern int i386_pe_dllimport_name_p (const char *);
icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps
: fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps
: fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd
- : CODE_FOR_sse2_movlpd);
+ : CODE_FOR_sse2_movsd);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps
: fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps
: fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd
- : CODE_FOR_sse2_movlpd);
+ : CODE_FOR_sse2_movsd);
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
return (!TARGET_64BIT && type && mode == TImode);
}
+/* Initialize vector TARGET via VALS. */
+void
+ix86_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ int n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ int i;
+
+ for (i = n_elts - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
+ && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
+ break;
+
+ /* Few special cases first...
+ ... constants are best loaded from constant pool. */
+ if (i < 0)
+ {
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ return;
+ }
+
+ /* ... values where only first field is non-constant are best loaded
+ from the pool and overwriten via move later. */
+ if (!i)
+ {
+ rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0),
+ GET_MODE_INNER (mode), 0);
+
+ op = force_reg (mode, op);
+ XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode));
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ switch (GET_MODE (target))
+ {
+ case V2DFmode:
+ emit_insn (gen_sse2_movsd (target, target, op));
+ break;
+ case V4SFmode:
+ emit_insn (gen_sse_movss (target, target, op));
+ break;
+ default:
+ break;
+ }
+ return;
+ }
+
+ /* And the busy sequence doing rotations. */
+ switch (GET_MODE (target))
+ {
+ case V2DFmode:
+ {
+ rtx vecop0 =
+ simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0);
+ rtx vecop1 =
+ simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0);
+
+ vecop0 = force_reg (V2DFmode, vecop0);
+ vecop1 = force_reg (V2DFmode, vecop1);
+ emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1));
+ }
+ break;
+ case V4SFmode:
+ {
+ rtx vecop0 =
+ simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0);
+ rtx vecop1 =
+ simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0);
+ rtx vecop2 =
+ simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0);
+ rtx vecop3 =
+ simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0);
+ rtx tmp1 = gen_reg_rtx (V4SFmode);
+ rtx tmp2 = gen_reg_rtx (V4SFmode);
+
+ vecop0 = force_reg (V4SFmode, vecop0);
+ vecop1 = force_reg (V4SFmode, vecop1);
+ vecop2 = force_reg (V4SFmode, vecop2);
+ vecop3 = force_reg (V4SFmode, vecop3);
+ emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3));
+ emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2));
+ emit_insn (gen_sse_unpcklps (target, tmp2, tmp1));
+ }
+ break;
+ default:
+ abort ();
+ }
+}
+
#include "gt-i386.h"
"TARGET_SSE2 && TARGET_SSE_MATH && TARGET_64BIT"
"x86_emit_floatuns (operands); DONE;")
\f
+;; SSE extract/set expanders
+
+(define_expand "vec_setv2df"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE2"
+{
+ switch (INTVAL (operands[2]))
+ {
+ case 0:
+ emit_insn (gen_sse2_movsd (operands[0], operands[0],
+ simplify_gen_subreg (V2DFmode, operands[1],
+ DFmode, 0)));
+ break;
+ case 1:
+ {
+ rtx op1 = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0);
+
+ emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], op1));
+ }
+ break;
+ default:
+ abort ();
+ }
+ DONE;
+})
+
+(define_expand "vec_extractv2df"
+ [(match_operand:DF 0 "register_operand" "")
+ (match_operand:V2DF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE2"
+{
+ switch (INTVAL (operands[2]))
+ {
+ case 0:
+ emit_move_insn (operands[0], gen_lowpart (DFmode, operands[1]));
+ break;
+ case 1:
+ {
+ rtx dest = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
+
+ emit_insn (gen_sse2_unpckhpd (dest, operands[1], operands[1]));
+ }
+ break;
+ default:
+ abort ();
+ }
+ DONE;
+})
+
+(define_expand "vec_initv2df"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE2"
+{
+ ix86_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ switch (INTVAL (operands[2]))
+ {
+ case 0:
+ emit_insn (gen_sse_movss (operands[0], operands[0],
+ simplify_gen_subreg (V4SFmode, operands[1],
+ SFmode, 0)));
+ break;
+ case 1:
+ {
+ rtx op1 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
+ rtx tmp = gen_reg_rtx (V4SFmode);
+
+ emit_move_insn (tmp, operands[0]);
+ emit_insn (gen_sse_unpcklps (operands[0], operands[0], operands[0]));
+ emit_insn (gen_sse_movss (operands[0], operands[0], op1));
+ emit_insn (gen_sse_shufps (operands[0], operands[0], tmp,
+ GEN_INT (1 + (0<<2) + (2<<4) + (3<<6))));
+ }
+ case 2:
+ {
+ rtx op1 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
+ rtx tmp = gen_reg_rtx (V4SFmode);
+
+ emit_move_insn (tmp, operands[0]);
+ emit_insn (gen_sse_movss (tmp, tmp, op1));
+ emit_insn (gen_sse_shufps (operands[0], operands[0], tmp,
+ GEN_INT (0 + (1<<2) + (0<<4) + (3<<6))));
+ }
+ break;
+ case 3:
+ {
+ rtx op1 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
+ rtx tmp = gen_reg_rtx (V4SFmode);
+
+ emit_move_insn (tmp, operands[0]);
+ emit_insn (gen_sse_movss (tmp, tmp, op1));
+ emit_insn (gen_sse_shufps (operands[0], operands[0], tmp,
+ GEN_INT (0 + (1<<2) + (2<<4) + (0<<6))));
+ }
+ break;
+ default:
+ abort ();
+ }
+ DONE;
+})
+
+(define_expand "vec_extractv4sf"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ switch (INTVAL (operands[2]))
+ {
+ case 0:
+ emit_move_insn (operands[0], gen_lowpart (SFmode, operands[1]));
+ break;
+ case 1:
+ {
+ rtx op0 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
+ rtx tmp = gen_reg_rtx (V4SFmode);
+
+ emit_move_insn (tmp, operands[1]);
+ emit_insn (gen_sse_shufps (op0, tmp, tmp,
+ GEN_INT (1)));
+ }
+ case 2:
+ {
+ rtx op0 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
+ rtx tmp = gen_reg_rtx (V4SFmode);
+
+ emit_move_insn (tmp, operands[1]);
+ emit_insn (gen_sse_unpckhps (op0, tmp, tmp));
+ }
+ case 3:
+ {
+ rtx op0 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
+ rtx tmp = gen_reg_rtx (V4SFmode);
+
+ emit_move_insn (tmp, operands[1]);
+ emit_insn (gen_sse_shufps (op0, tmp, tmp,
+ GEN_INT (3)));
+ }
+ default:
+ abort ();
+ }
+ DONE;
+})
+
+(define_expand "vec_initv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+\f
;; Add instructions
;; %%% splits for addsidi3
(vec_select:DF (match_operand:V2DF 1 "register_operand" "0")
(parallel [(const_int 1)]))
(vec_select:DF (match_operand:V2DF 2 "register_operand" "x")
- (parallel [(const_int 0)]))))]
+ (parallel [(const_int 1)]))))]
"TARGET_SSE2"
"unpckhpd\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecvt")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "V2DF")])
(define_insn "sse2_unpcklpd"
[(set (match_operand:V2DF 0 "register_operand" "=x")
(vec_select:DF (match_operand:V2DF 1 "register_operand" "0")
(parallel [(const_int 0)]))
(vec_select:DF (match_operand:V2DF 2 "register_operand" "x")
- (parallel [(const_int 1)]))))]
+ (parallel [(const_int 0)]))))]
"TARGET_SSE2"
"unpcklpd\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecvt")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "V2DF")])
;; MMX pack/unpack insns.
[(set_attr "type" "ssecvt")
(set_attr "mode" "V2DF")])
-(define_insn "sse2_movlpd"
- [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,m")
- (vec_merge:V2DF
- (match_operand:V2DF 1 "nonimmediate_operand" "0,0")
- (match_operand:V2DF 2 "nonimmediate_operand" "m,x")
- (const_int 1)))]
- "TARGET_SSE2 && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM)"
- "movlpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "ssecvt")
- (set_attr "mode" "V2DF")])
-
(define_expand "sse2_loadsd"
[(match_operand:V2DF 0 "register_operand" "")
(match_operand:DF 1 "memory_operand" "")]
(set_attr "mode" "DF")])
(define_insn "sse2_movsd"
- [(set (match_operand:V2DF 0 "register_operand" "=x")
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
(vec_merge:V2DF
- (match_operand:V2DF 1 "register_operand" "0")
- (match_operand:V2DF 2 "register_operand" "x")
+ (match_operand:V2DF 1 "nonimmediate_operand" "0,0,0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "x,m,x")
(const_int 1)))]
- "TARGET_SSE2"
- "movsd\t{%2, %0|%0, %2}"
+ "TARGET_SSE2 && ix86_binary_operator_ok (UNKNOWN, V2DFmode, operands)"
+ "@movsd\t{%2, %0|%0, %2}
+ movlpd\t{%2, %0|%0, %2}
+ movlpd\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecvt")
- (set_attr "mode" "DF")])
+ (set_attr "mode" "DF,V2DF,V2DF")])
(define_insn "sse2_storesd"
[(set (match_operand:DF 0 "memory_operand" "=m")
/* Internal data types for implementing the intrinsics. */
typedef int __v4sf __attribute__ ((__mode__(__V4SF__)));
-typedef int __v4si __attribute__ ((__mode__(__V4SI__)));
/* Create a selector for use with the SHUFPS instruction. */
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
/* Create the vector [Z Y X W]. */
static __inline __m128
-_mm_set_ps (float __Z, float __Y, float __X, float __W)
+_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
{
- union {
- float __a[4];
- __m128 __v;
- } __u;
-
- __u.__a[0] = __W;
- __u.__a[1] = __X;
- __u.__a[2] = __Y;
- __u.__a[3] = __Z;
-
- return __u.__v;
+ return (__v4sf) {__W, __X, __Y, __Z};
}
/* Create the vector [W X Y Z]. */
value = protect_from_queue (value, 0);
+ /* Use vec_extract patterns for extracting parts of vectors whenever
+ available. */
+ if (VECTOR_MODE_P (GET_MODE (op0))
+ && GET_CODE (op0) != MEM
+ && (vec_set_optab->handlers[(int)GET_MODE (op0)].insn_code
+ != CODE_FOR_nothing)
+ && fieldmode == GET_MODE_INNER (GET_MODE (op0))
+ && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
+ && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
+ {
+ enum machine_mode outermode = GET_MODE (op0);
+ enum machine_mode innermode = GET_MODE_INNER (outermode);
+ int icode = (int) vec_set_optab->handlers[(int) outermode].insn_code;
+ int pos = bitnum / GET_MODE_BITSIZE (innermode);
+ rtx rtxpos = GEN_INT (pos);
+ rtx src = value;
+ rtx dest = op0;
+ rtx pat, seq;
+ enum machine_mode mode0 = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[2].mode;
+
+ start_sequence ();
+
+ if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
+ src = copy_to_mode_reg (mode1, src);
+
+ if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
+ rtxpos = copy_to_mode_reg (mode1, rtxpos);
+
+ /* We could handle this, but we should always be called with a pseudo
+ for our targets and all insns should take them as outputs. */
+ if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
+ || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
+ || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
+ abort ();
+ pat = GEN_FCN (icode) (dest, src, rtxpos);
+ seq = get_insns ();
+ end_sequence ();
+ if (pat)
+ {
+ emit_insn (seq);
+ emit_insn (pat);
+ return dest;
+ }
+ }
+
if (flag_force_mem)
{
int old_generating_concat_p = generating_concat_p;
return op0;
}
+ /* Use vec_extract patterns for extracting parts of vectors whenever
+ available. */
+ if (VECTOR_MODE_P (GET_MODE (op0))
+ && GET_CODE (op0) != MEM
+ && (vec_extract_optab->handlers[(int)GET_MODE (op0)].insn_code
+ != CODE_FOR_nothing)
+ && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
+ == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
+ {
+ enum machine_mode outermode = GET_MODE (op0);
+ enum machine_mode innermode = GET_MODE_INNER (outermode);
+ int icode = (int) vec_extract_optab->handlers[(int) outermode].insn_code;
+ int pos = bitnum / GET_MODE_BITSIZE (innermode);
+ rtx rtxpos = GEN_INT (pos);
+ rtx src = op0;
+ rtx dest = NULL, pat, seq;
+ enum machine_mode mode0 = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[2].mode;
+
+ if (innermode == tmode || innermode == mode)
+ dest = target;
+
+ if (!dest)
+ dest = gen_reg_rtx (innermode);
+
+ start_sequence ();
+
+ if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
+ dest = copy_to_mode_reg (mode0, dest);
+
+ if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
+ src = copy_to_mode_reg (mode1, src);
+
+ if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
+ rtxpos = copy_to_mode_reg (mode1, rtxpos);
+
+ /* We could handle this, but we should always be called with a pseudo
+ for our targets and all insns should take them as outputs. */
+ if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)
+ || ! (*insn_data[icode].operand[1].predicate) (src, mode1)
+ || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
+ abort ();
+ pat = GEN_FCN (icode) (dest, src, rtxpos);
+ seq = get_insns ();
+ end_sequence ();
+ if (pat)
+ {
+ emit_insn (seq);
+ emit_insn (pat);
+ return extract_bit_field (dest, bitsize,
+ bitnum - pos * GET_MODE_BITSIZE (innermode),
+ unsignedp, target, mode, tmode, total_size);
+ }
+ }
+
/* Make sure we are playing with integral modes. Pun with subregs
if we aren't. */
{
int const_bounds_p;
HOST_WIDE_INT minelt = 0;
HOST_WIDE_INT maxelt = 0;
+ int icode = 0;
+ rtx *vector = NULL;
+ int elt_size = 0;
+ unsigned n_elts = 0;
/* Vectors are like arrays, but the domain is stored via an array
type indirectly. */
it always will. */
domain = TYPE_DEBUG_REPRESENTATION_TYPE (type);
domain = TYPE_DOMAIN (TREE_TYPE (TYPE_FIELDS (domain)));
+ if (REG_P (target) && VECTOR_MODE_P (GET_MODE (target)))
+ {
+ enum machine_mode mode = GET_MODE (target);
+
+ icode = (int) vec_init_optab->handlers[mode].insn_code;
+ if (icode != CODE_FOR_nothing)
+ {
+ unsigned int i;
+
+ elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ vector = alloca (n_elts);
+ for (i = 0; i < n_elts; i++)
+ vector [i] = CONST0_RTX (GET_MODE_INNER (mode));
+ }
+ }
}
const_bounds_p = (TYPE_MIN_VALUE (domain)
need_to_clear = 1;
}
- if (need_to_clear && size > 0)
+ if (need_to_clear && size > 0 && !vector)
{
if (! cleared)
{
HOST_WIDE_INT lo, hi, count;
tree position;
+ if (vector)
+ abort ();
+
/* If the range is constant and "small", unroll the loop. */
if (const_bounds_p
&& host_integerp (lo_index, 0)
{
tree position;
+ if (vector)
+ abort ();
+
if (index == 0)
index = ssize_int (1);
xtarget = adjust_address (xtarget, mode, 0);
store_expr (value, xtarget, 0);
}
+ else if (vector)
+ {
+ int pos;
+
+ if (index != 0)
+ pos = tree_low_cst (index, 0) - minelt;
+ else
+ pos = i;
+ vector[pos] = expand_expr (value, NULL_RTX, VOIDmode, 0);
+ }
else
{
if (index != 0)
target = copy_rtx (target);
MEM_KEEP_ALIAS_SET_P (target) = 1;
}
-
- store_constructor_field (target, bitsize, bitpos, mode, value,
- type, cleared, get_alias_set (elttype));
-
+ else
+ store_constructor_field (target, bitsize, bitpos, mode, value,
+ type, cleared, get_alias_set (elttype));
}
}
+ if (vector)
+ {
+ emit_insn (GEN_FCN (icode) (target,
+ gen_rtx_PARALLEL (GET_MODE (target),
+ gen_rtvec_v (n_elts, vector))));
+ }
}
/* Set constructor assignments. */
"movstr_optab[$A] = CODE_FOR_$(movstr$a$)",
"clrstr_optab[$A] = CODE_FOR_$(clrstr$a$)",
"cmpstr_optab[$A] = CODE_FOR_$(cmpstr$a$)",
- "cmpmem_optab[$A] = CODE_FOR_$(cmpmem$a$)" };
+ "cmpmem_optab[$A] = CODE_FOR_$(cmpmem$a$)",
+ "vec_set_optab->handlers[$A].insn_code = CODE_FOR_$(vec_set$a$)",
+ "vec_extract_optab->handlers[$A].insn_code = CODE_FOR_$(vec_extract$a$)",
+ "vec_init_optab->handlers[$A].insn_code = CODE_FOR_$(vec_init$a$)" };
static void gen_insn (rtx);
cstore_optab = init_optab (UNKNOWN);
push_optab = init_optab (UNKNOWN);
+ vec_extract_optab = init_optab (UNKNOWN);
+ vec_set_optab = init_optab (UNKNOWN);
+ vec_init_optab = init_optab (UNKNOWN);
/* Conversions. */
sext_optab = init_convert_optab (SIGN_EXTEND);
zext_optab = init_convert_optab (ZERO_EXTEND);
/* Conditional add instruction. */
OTI_addcc,
+ /* Set specified field of vector operand. */
+ OTI_vec_set,
+ /* Extract specified field of vector operand. */
+ OTI_vec_extract,
+ /* Initialize vector operand. */
+ OTI_vec_init,
+
OTI_MAX
};
#define push_optab (optab_table[OTI_push])
#define addcc_optab (optab_table[OTI_addcc])
+#define vec_set_optab (optab_table[OTI_vec_set])
+#define vec_extract_optab (optab_table[OTI_vec_extract])
+#define vec_init_optab (optab_table[OTI_vec_init])
+
/* Conversion optabs have their own table and indexes. */
enum convert_optab_index
{