{ OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permv2ti, "__builtin_ia32_permti256", IX86_BUILTIN_VPERMTI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT },
{ OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_extracti128, "__builtin_ia32_extract128i256", IX86_BUILTIN_VEXTRACT128I256, UNKNOWN, (int) V2DI_FTYPE_V4DI_INT },
{ OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_inserti128, "__builtin_ia32_insert128i256", IX86_BUILTIN_VINSERT128I256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V2DI_INT },
- { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv4di, "__builtin_ia32_psllv4di", IX86_BUILTIN_PSLLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
- { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv2di, "__builtin_ia32_psllv2di", IX86_BUILTIN_PSLLVV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
- { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv8si, "__builtin_ia32_psllv8si", IX86_BUILTIN_PSLLVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
- { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshlvv4si, "__builtin_ia32_psllv4si", IX86_BUILTIN_PSLLVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
+ { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv4di, "__builtin_ia32_psllv4di", IX86_BUILTIN_PSLLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
+ { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv2di, "__builtin_ia32_psllv2di", IX86_BUILTIN_PSLLVV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
+ { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv8si, "__builtin_ia32_psllv8si", IX86_BUILTIN_PSLLVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
+ { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv4si, "__builtin_ia32_psllv4si", IX86_BUILTIN_PSLLVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
{ OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashrvv8si, "__builtin_ia32_psrav8si", IX86_BUILTIN_PSRAVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
{ OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashrvv4si, "__builtin_ia32_psrav4si", IX86_BUILTIN_PSRAVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
{ OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrvv4di, "__builtin_ia32_psrlv4di", IX86_BUILTIN_PSRLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
(define_code_iterator any_shiftrt [lshiftrt ashiftrt])
;; Base name for define_insn
-(define_code_attr shiftrt_insn [(lshiftrt "lshr") (ashiftrt "ashr")])
+(define_code_attr shift_insn
+ [(ashift "ashl") (lshiftrt "lshr") (ashiftrt "ashr")])
;; Base name for insn mnemonic.
-(define_code_attr shiftrt [(lshiftrt "shr") (ashiftrt "sar")])
+(define_code_attr shift [(ashift "sll") (lshiftrt "shr") (ashiftrt "sar")])
;; Mapping of rotate operators
(define_code_iterator any_rotate [rotate rotatert])
;; See comment above `ashl<mode>3' about how this works.
-(define_expand "<shiftrt_insn><mode>3"
+(define_expand "<shift_insn><mode>3"
[(set (match_operand:SDWIM 0 "<shift_operand>" "")
(any_shiftrt:SDWIM (match_operand:SDWIM 1 "<shift_operand>" "")
(match_operand:QI 2 "nonmemory_operand" "")))]
"ix86_expand_binary_operator (<CODE>, <MODE>mode, operands); DONE;")
;; Avoid useless masking of count operand.
-(define_insn_and_split "*<shiftrt_insn><mode>3_mask"
+(define_insn_and_split "*<shift_insn><mode>3_mask"
[(set (match_operand:SWI48 0 "nonimmediate_operand" "=rm")
(any_shiftrt:SWI48
(match_operand:SWI48 1 "nonimmediate_operand" "0")
[(set_attr "type" "ishift")
(set_attr "mode" "<MODE>")])
-(define_insn_and_split "*<shiftrt_insn><mode>3_doubleword"
+(define_insn_and_split "*<shift_insn><mode>3_doubleword"
[(set (match_operand:DWI 0 "register_operand" "=r")
(any_shiftrt:DWI (match_operand:DWI 1 "register_operand" "0")
(match_operand:QI 2 "nonmemory_operand" "<S>c")))
"#"
"(optimize && flag_peephole2) ? epilogue_completed : reload_completed"
[(const_int 0)]
- "ix86_split_<shiftrt_insn> (operands, NULL_RTX, <MODE>mode); DONE;"
+ "ix86_split_<shift_insn> (operands, NULL_RTX, <MODE>mode); DONE;"
[(set_attr "type" "multi")])
;; By default we don't ask for a scratch register, because when DWImode
(match_dup 3)]
"TARGET_CMOVE"
[(const_int 0)]
- "ix86_split_<shiftrt_insn> (operands, operands[3], <DWI>mode); DONE;")
+ "ix86_split_<shift_insn> (operands, operands[3], <DWI>mode); DONE;")
(define_insn "x86_64_shrd"
[(set (match_operand:DI 0 "nonimmediate_operand" "+r*m")
DONE;
})
-(define_insn "*bmi2_<shiftrt_insn><mode>3_1"
+(define_insn "*bmi2_<shift_insn><mode>3_1"
[(set (match_operand:SWI48 0 "register_operand" "=r")
(any_shiftrt:SWI48 (match_operand:SWI48 1 "nonimmediate_operand" "rm")
(match_operand:SWI48 2 "register_operand" "r")))]
"TARGET_BMI2"
- "<shiftrt>x\t{%2, %1, %0|%0, %1, %2}"
+ "<shift>x\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ishiftx")
(set_attr "mode" "<MODE>")])
-(define_insn "*<shiftrt_insn><mode>3_1"
+(define_insn "*<shift_insn><mode>3_1"
[(set (match_operand:SWI48 0 "nonimmediate_operand" "=rm,r")
(any_shiftrt:SWI48
(match_operand:SWI48 1 "nonimmediate_operand" "0,rm")
default:
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{<imodesuffix>}\t%0";
+ return "<shift>{<imodesuffix>}\t%0";
else
- return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+ return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
[(set_attr "isa" "*,bmi2")
(any_shiftrt:SWI48 (match_dup 1) (match_dup 2)))]
"operands[2] = gen_lowpart (<MODE>mode, operands[2]);")
-(define_insn "*bmi2_<shiftrt_insn>si3_1_zext"
+(define_insn "*bmi2_<shift_insn>si3_1_zext"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI
(any_shiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "rm")
(match_operand:SI 2 "register_operand" "r"))))]
"TARGET_64BIT && TARGET_BMI2"
- "<shiftrt>x\t{%2, %1, %k0|%k0, %1, %2}"
+ "<shift>x\t{%2, %1, %k0|%k0, %1, %2}"
[(set_attr "type" "ishiftx")
(set_attr "mode" "SI")])
-(define_insn "*<shiftrt_insn>si3_1_zext"
+(define_insn "*<shift_insn>si3_1_zext"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(zero_extend:DI
(any_shiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0,rm")
default:
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{l}\t%k0";
+ return "<shift>{l}\t%k0";
else
- return "<shiftrt>{l}\t{%2, %k0|%k0, %2}";
+ return "<shift>{l}\t{%2, %k0|%k0, %2}";
}
}
[(set_attr "isa" "*,bmi2")
(zero_extend:DI (any_shiftrt:SI (match_dup 1) (match_dup 2))))]
"operands[2] = gen_lowpart (SImode, operands[2]);")
-(define_insn "*<shiftrt_insn><mode>3_1"
+(define_insn "*<shift_insn><mode>3_1"
[(set (match_operand:SWI12 0 "nonimmediate_operand" "=<r>m")
(any_shiftrt:SWI12
(match_operand:SWI12 1 "nonimmediate_operand" "0")
{
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{<imodesuffix>}\t%0";
+ return "<shift>{<imodesuffix>}\t%0";
else
- return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+ return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
}
[(set_attr "type" "ishift")
(set (attr "length_immediate")
(const_string "*")))
(set_attr "mode" "<MODE>")])
-(define_insn "*<shiftrt_insn>qi3_1_slp"
+(define_insn "*<shift_insn>qi3_1_slp"
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
(any_shiftrt:QI (match_dup 0)
(match_operand:QI 1 "nonmemory_operand" "cI")))
{
if (operands[1] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{b}\t%0";
+ return "<shift>{b}\t%0";
else
- return "<shiftrt>{b}\t{%1, %0|%0, %1}";
+ return "<shift>{b}\t{%1, %0|%0, %1}";
}
[(set_attr "type" "ishift1")
(set (attr "length_immediate")
;; This pattern can't accept a variable shift count, since shifts by
;; zero don't affect the flags. We assume that shifts by constant
;; zero are optimized away.
-(define_insn "*<shiftrt_insn><mode>3_cmp"
+(define_insn "*<shift_insn><mode>3_cmp"
[(set (reg FLAGS_REG)
(compare
(any_shiftrt:SWI
{
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{<imodesuffix>}\t%0";
+ return "<shift>{<imodesuffix>}\t%0";
else
- return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+ return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
}
[(set_attr "type" "ishift")
(set (attr "length_immediate")
(const_string "*")))
(set_attr "mode" "<MODE>")])
-(define_insn "*<shiftrt_insn>si3_cmp_zext"
+(define_insn "*<shift_insn>si3_cmp_zext"
[(set (reg FLAGS_REG)
(compare
(any_shiftrt:SI (match_operand:SI 1 "register_operand" "0")
{
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{l}\t%k0";
+ return "<shift>{l}\t%k0";
else
- return "<shiftrt>{l}\t{%2, %k0|%k0, %2}";
+ return "<shift>{l}\t{%2, %k0|%k0, %2}";
}
[(set_attr "type" "ishift")
(set (attr "length_immediate")
(const_string "*")))
(set_attr "mode" "SI")])
-(define_insn "*<shiftrt_insn><mode>3_cconly"
+(define_insn "*<shift_insn><mode>3_cconly"
[(set (reg FLAGS_REG)
(compare
(any_shiftrt:SWI
{
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
- return "<shiftrt>{<imodesuffix>}\t%0";
+ return "<shift>{<imodesuffix>}\t%0";
else
- return "<shiftrt>{<imodesuffix>}\t{%2, %0|%0, %2}";
+ return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
}
[(set_attr "type" "ishift")
(set (attr "length_immediate")
(V2DI "vec") (V4DI "avx2")])
;; Mapping of logic-shift operators
-(define_code_iterator lshift [lshiftrt ashift])
-
-;; Base name for define_insn
-(define_code_attr lshift_insn [(lshiftrt "srl") (ashift "sll")])
-
-;; Base name for insn mnemonic
-(define_code_attr lshift [(lshiftrt "lshr") (ashift "lshl")])
+(define_code_iterator any_lshift [ashift lshiftrt])
(define_mode_attr ssedoublemode
[(V16HI "V16SI") (V8HI "V8SI")])
;; XOP packed shift instructions.
(define_expand "vlshr<mode>3"
- [(match_operand:VI12_128 0 "register_operand" "")
- (match_operand:VI12_128 1 "register_operand" "")
- (match_operand:VI12_128 2 "nonimmediate_operand" "")]
+ [(set (match_operand:VI12_128 0 "register_operand" "")
+ (lshiftrt:VI12_128
+ (match_operand:VI12_128 1 "register_operand" "")
+ (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
"TARGET_XOP"
{
rtx neg = gen_reg_rtx (<MODE>mode);
"TARGET_AVX2")
(define_expand "vashr<mode>3"
- [(match_operand:VI128_128 0 "register_operand" "")
- (match_operand:VI128_128 1 "register_operand" "")
- (match_operand:VI128_128 2 "nonimmediate_operand" "")]
+ [(set (match_operand:VI128_128 0 "register_operand" "")
+ (ashiftrt:VI128_128
+ (match_operand:VI128_128 1 "register_operand" "")
+ (match_operand:VI128_128 2 "nonimmediate_operand" "")))]
"TARGET_XOP"
{
rtx neg = gen_reg_rtx (<MODE>mode);
"TARGET_AVX2")
(define_expand "vashl<mode>3"
- [(match_operand:VI12_128 0 "register_operand" "")
- (match_operand:VI12_128 1 "register_operand" "")
- (match_operand:VI12_128 2 "register_operand" "")]
+ [(set (match_operand:VI12_128 0 "register_operand" "")
+ (ashift:VI12_128
+ (match_operand:VI12_128 1 "register_operand" "")
+ (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
"TARGET_XOP"
{
emit_insn (gen_xop_ashl<mode>3 (operands[0], operands[1], operands[2]));
(define_insn "avx2_ashrv<mode>"
[(set (match_operand:VI4_AVX2 0 "register_operand" "=x")
- (ashiftrt:VI4_AVX2 (match_operand:VI4_AVX2 1 "register_operand" "x")
- (match_operand:VI4_AVX2 2 "nonimmediate_operand"
- "xm")))]
+ (ashiftrt:VI4_AVX2
+ (match_operand:VI4_AVX2 1 "register_operand" "x")
+ (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm")))]
"TARGET_AVX2"
"vpsravd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
-(define_insn "avx2_<lshift>v<mode>"
+(define_insn "avx2_<shift_insn>v<mode>"
[(set (match_operand:VI48_AVX2 0 "register_operand" "=x")
- (lshift:VI48_AVX2 (match_operand:VI48_AVX2 1 "register_operand" "x")
- (match_operand:VI48_AVX2 2 "nonimmediate_operand"
- "xm")))]
+ (any_lshift:VI48_AVX2
+ (match_operand:VI48_AVX2 1 "register_operand" "x")
+ (match_operand:VI48_AVX2 2 "nonimmediate_operand" "xm")))]
"TARGET_AVX2"
- "vp<lshift_insn>v<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ "vp<shift>v<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])