[(set_attr "type" "insert")])
; There are also some forms without one of the ANDs.
-(define_insn "*rotl<mode>3_insert_3"
+(define_insn "rotl<mode>3_insert_3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (and:GPR (match_operand:GPR 3 "gpc_reg_operand" "0")
(match_operand:GPR 4 "const_int_operand" "n"))
}
[(set_attr "type" "insert")])
+(define_code_iterator plus_ior_xor [plus ior xor])
+
+(define_split
+ [(set (match_operand:GPR 0 "gpc_reg_operand")
+ (plus_ior_xor:GPR (ashift:GPR (match_operand:GPR 1 "gpc_reg_operand")
+ (match_operand:SI 2 "const_int_operand"))
+ (match_operand:GPR 3 "gpc_reg_operand")))]
+ "nonzero_bits (operands[3], <MODE>mode)
+ < HOST_WIDE_INT_1U << INTVAL (operands[2])"
+ [(set (match_dup 0)
+ (ior:GPR (and:GPR (match_dup 3)
+ (match_dup 4))
+ (ashift:GPR (match_dup 1)
+ (match_dup 2))))]
+{
+ operands[4] = GEN_INT ((HOST_WIDE_INT_1U << INTVAL (operands[2])) - 1);
+})
+
(define_insn "*rotl<mode>3_insert_4"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (and:GPR (match_operand:GPR 3 "gpc_reg_operand" "0")
(use (match_operand:SI 4 "gpc_reg_operand"))]
"VECTOR_MEM_VSX_P (V4SImode) && TARGET_DIRECT_MOVE_64BIT"
{
- rtx a = gen_reg_rtx (DImode);
- rtx b = gen_reg_rtx (DImode);
- rtx c = gen_reg_rtx (DImode);
- rtx d = gen_reg_rtx (DImode);
- emit_insn (gen_zero_extendsidi2 (a, operands[1]));
- emit_insn (gen_zero_extendsidi2 (b, operands[2]));
- emit_insn (gen_zero_extendsidi2 (c, operands[3]));
- emit_insn (gen_zero_extendsidi2 (d, operands[4]));
+ rtx a = gen_lowpart_SUBREG (DImode, operands[1]);
+ rtx b = gen_lowpart_SUBREG (DImode, operands[2]);
+ rtx c = gen_lowpart_SUBREG (DImode, operands[3]);
+ rtx d = gen_lowpart_SUBREG (DImode, operands[4]);
if (!BYTES_BIG_ENDIAN)
{
std::swap (a, b);
std::swap (c, d);
}
- rtx aa = gen_reg_rtx (DImode);
rtx ab = gen_reg_rtx (DImode);
- rtx cc = gen_reg_rtx (DImode);
rtx cd = gen_reg_rtx (DImode);
- emit_insn (gen_ashldi3 (aa, a, GEN_INT (32)));
- emit_insn (gen_ashldi3 (cc, c, GEN_INT (32)));
- emit_insn (gen_iordi3 (ab, aa, b));
- emit_insn (gen_iordi3 (cd, cc, d));
+ emit_insn (gen_rotldi3_insert_3 (ab, a, GEN_INT (32), b,
+ GEN_INT (0xffffffff)));
+ emit_insn (gen_rotldi3_insert_3 (cd, c, GEN_INT (32), d,
+ GEN_INT (0xffffffff)));
rtx abcd = gen_reg_rtx (V2DImode);
emit_insn (gen_vsx_concat_v2di (abcd, ab, cd));
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-options "-O2 -mdejagnu-cpu=power8" } */
+
+/* Check that we can optimize sldi + or to rldimi for vector int init. */
+
+vector unsigned int
+testu (unsigned int i1, unsigned int i2, unsigned int i3, unsigned int i4)
+{
+ vector unsigned int v = {i1, i2, i3, i4};
+ return v;
+}
+
+vector signed int
+tests (signed int i1, signed int i2, signed int i3, signed int i4)
+{
+ vector signed int v = {i1, i2, i3, i4};
+ return v;
+}
+
+/* { dg-final { scan-assembler-not {\msldi\M} } } */
+/* { dg-final { scan-assembler-not {\mor\M} } } */
+/* { dg-final { scan-assembler-times {\mrldimi\M} 4 } } */