From fe03c9a67877a11f50847ca32a6d0fbc17580410 Mon Sep 17 00:00:00 2001 From: Colin LeMahieu Date: Wed, 28 Jan 2015 17:37:59 +0000 Subject: [PATCH] [Hexagon] Replacing XTYPE/SHIFT intrinsic patternss. Adding tests and missing instructions with tests. llvm-svn: 227330 --- llvm/lib/Target/Hexagon/HexagonInstrInfo.td | 5 + llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td | 4 + llvm/lib/Target/Hexagon/HexagonIntrinsics.td | 385 +++++++------ llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td | 12 + llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td | 13 + .../test/CodeGen/Hexagon/intrinsics/xtype_shift.ll | 635 +++++++++++++++++++++ llvm/test/MC/Disassembler/Hexagon/xtype_shift.txt | 4 + 7 files changed, 864 insertions(+), 194 deletions(-) create mode 100644 llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.td b/llvm/lib/Target/Hexagon/HexagonInstrInfo.td index 128cb0d..1a4b603 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.td @@ -3358,6 +3358,11 @@ def S2_asl_i_r_sat : T_S2op_2_ii <"asl", 0b010, 0b010, 1>; let isCodeGenOnly = 0 in def S2_asr_i_r_rnd : T_S2op_2_ii <"asr", 0b010, 0b000, 0, 1>; +def S2_asr_i_r_rnd_goodsyntax + : SInst <(outs IntRegs:$dst), (ins IntRegs:$src, u5Imm:$u5), + "$dst = asrrnd($src, #$u5)", + [], "", S_2op_tc_1_SLOT23>; + def: Pat<(i32 (sra (i32 (add (i32 (sra I32:$src1, u5ImmPred:$src2)), (i32 1))), (i32 1))), diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td b/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td index 600eced..54c4d37 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfoV5.td @@ -25,6 +25,10 @@ def S2_asr_i_p_rnd : S_2OpInstImm<"asr", 0b110, 0b111, u6Imm, let Inst{13-8} = src2; } +def S2_asr_i_p_rnd_goodsyntax + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), + "$dst = asrrnd($src1, #$src2)">; + let isCodeGenOnly = 0 in def C4_fastcorner9 : T_LOGICAL_2OP<"fastcorner9", 0b000, 0, 0>, Requires<[HasV5T]> { diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td index ee30a0b..f11fc7d 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td @@ -33,6 +33,10 @@ class T_IR_pat ; +class T_PI_pat + : Pat<(IntID I64:$Rs, imm:$It), + (MI DoubleRegs:$Rs, imm:$It)>; + class T_RR_pat : Pat <(IntID I32:$Rs, I32:$Rt), (MI I32:$Rs, I32:$Rt)>; @@ -57,14 +61,30 @@ class T_RRI_pat : Pat <(IntID I32:$Rs, I32:$Rt, imm:$Iu), (MI I32:$Rs, I32:$Rt, imm:$Iu)>; +class T_IRI_pat + : Pat <(IntID imm:$It, I32:$Rs, imm:$Iu), + (MI imm:$It, I32:$Rs, imm:$Iu)>; + class T_RRR_pat : Pat <(IntID I32:$Rs, I32:$Rt, I32:$Ru), (MI I32:$Rs, I32:$Rt, I32:$Ru)>; +class T_PPI_pat + : Pat <(IntID I64:$Rs, I64:$Rt, imm:$Iu), + (MI DoubleRegs:$Rs, DoubleRegs:$Rt, imm:$Iu)>; + +class T_PPR_pat + : Pat <(IntID I64:$Rs, I64:$Rt, I32:$Ru), + (MI DoubleRegs:$Rs, DoubleRegs:$Rt, I32:$Ru)>; + class T_PRR_pat : Pat <(IntID I64:$Rs, I32:$Rt, I32:$Ru), (MI DoubleRegs:$Rs, I32:$Rt, I32:$Ru)>; +class T_PR_pat + : Pat <(IntID I64:$Rs, I32:$Rt), + (MI DoubleRegs:$Rs, I32:$Rt)>; + //===----------------------------------------------------------------------===// // MPYS / Multipy signed/unsigned halfwords //Rd=mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat] @@ -310,6 +330,143 @@ def : T_RR_pat; def : T_RR_pat; def : T_RR_pat; +// Shift and accumulate +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; + +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; + +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; + +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; + +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; + +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; + +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; + +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; + +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; + +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; +def : T_RRI_pat ; + +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; + +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; +def : T_PPI_pat ; + +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; + +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; +def : T_RRR_pat ; + +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; + +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; +def : T_PPR_pat ; + /******************************************************************** * ALU32/ALU * *********************************************************************/ @@ -409,6 +566,40 @@ def : T_RRI_pat ; // XOR and XOR with destination def : T_RRR_pat ; +// Shift by immediate and add +def : T_RRI_pat; + +/******************************************************************** +* STYPE/SHIFT * +*********************************************************************/ + +def : T_PI_pat ; +def : T_PI_pat ; +def : T_PI_pat ; + +def : T_PR_pat ; +def : T_PR_pat ; +def : T_PR_pat ; +def : T_PR_pat ; + +def : T_RR_pat ; +def : T_RR_pat ; +def : T_RR_pat ; +def : T_RR_pat ; + +def : T_RR_pat ; +def : T_RR_pat ; + +def : T_RI_pat ; +def : T_RI_pat ; +def : T_RI_pat ; +def : T_RI_pat ; +def : T_RI_pat ; + +// Shift left by immediate with saturation. +def : T_RI_pat ; + // // ALU 32 types. // @@ -3144,200 +3335,6 @@ def HEXAGON_C2_tfrrp: def HEXAGON_C2_vitpack: si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>; - -/******************************************************************** -* STYPE/SHIFT * -*********************************************************************/ - -// STYPE / SHIFT / Shift by immediate. -def HEXAGON_S2_asl_i_r: - si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>; -def HEXAGON_S2_asr_i_r: - si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>; -def HEXAGON_S2_lsr_i_r: - si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>; -def HEXAGON_S2_asl_i_p: - di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>; -def HEXAGON_S2_asr_i_p: - di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>; -def HEXAGON_S2_lsr_i_p: - di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>; - -// STYPE / SHIFT / Shift by immediate and accumulate. -def HEXAGON_S2_asl_i_r_acc: - si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>; -def HEXAGON_S2_asr_i_r_acc: - si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>; -def HEXAGON_S2_lsr_i_r_acc: - si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>; -def HEXAGON_S2_asl_i_r_nac: - si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>; -def HEXAGON_S2_asr_i_r_nac: - si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>; -def HEXAGON_S2_lsr_i_r_nac: - si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>; -def HEXAGON_S2_asl_i_p_acc: - di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>; -def HEXAGON_S2_asr_i_p_acc: - di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>; -def HEXAGON_S2_lsr_i_p_acc: - di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>; -def HEXAGON_S2_asl_i_p_nac: - di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>; -def HEXAGON_S2_asr_i_p_nac: - di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>; -def HEXAGON_S2_lsr_i_p_nac: - di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>; - -// STYPE / SHIFT / Shift by immediate and add. -def HEXAGON_S2_addasl_rrri: - si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>; - -// STYPE / SHIFT / Shift by immediate and logical. -def HEXAGON_S2_asl_i_r_and: - si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>; -def HEXAGON_S2_asr_i_r_and: - si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>; -def HEXAGON_S2_lsr_i_r_and: - si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>; - -def HEXAGON_S2_asl_i_r_xacc: - si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>; -def HEXAGON_S2_lsr_i_r_xacc: - si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>; - -def HEXAGON_S2_asl_i_r_or: - si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>; -def HEXAGON_S2_asr_i_r_or: - si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>; -def HEXAGON_S2_lsr_i_r_or: - si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>; - -def HEXAGON_S2_asl_i_p_and: - di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>; -def HEXAGON_S2_asr_i_p_and: - di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>; -def HEXAGON_S2_lsr_i_p_and: - di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>; - -def HEXAGON_S2_asl_i_p_xacc: - di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>; -def HEXAGON_S2_lsr_i_p_xacc: - di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>; - -def HEXAGON_S2_asl_i_p_or: - di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>; -def HEXAGON_S2_asr_i_p_or: - di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>; -def HEXAGON_S2_lsr_i_p_or: - di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>; - -// STYPE / SHIFT / Shift right by immediate with rounding. -def HEXAGON_S2_asr_i_r_rnd: - si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>; -def HEXAGON_S2_asr_i_r_rnd_goodsyntax: - si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>; - -// STYPE / SHIFT / Shift left by immediate with saturation. -def HEXAGON_S2_asl_i_r_sat: - si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>; - -// STYPE / SHIFT / Shift by register. -def HEXAGON_S2_asl_r_r: - si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>; -def HEXAGON_S2_asr_r_r: - si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>; -def HEXAGON_S2_lsl_r_r: - si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>; -def HEXAGON_S2_lsr_r_r: - si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>; -def HEXAGON_S2_asl_r_p: - di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>; -def HEXAGON_S2_asr_r_p: - di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>; -def HEXAGON_S2_lsl_r_p: - di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>; -def HEXAGON_S2_lsr_r_p: - di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>; - -// STYPE / SHIFT / Shift by register and accumulate. -def HEXAGON_S2_asl_r_r_acc: - si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>; -def HEXAGON_S2_asr_r_r_acc: - si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>; -def HEXAGON_S2_lsl_r_r_acc: - si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>; -def HEXAGON_S2_lsr_r_r_acc: - si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>; -def HEXAGON_S2_asl_r_p_acc: - di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>; -def HEXAGON_S2_asr_r_p_acc: - di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>; -def HEXAGON_S2_lsl_r_p_acc: - di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>; -def HEXAGON_S2_lsr_r_p_acc: - di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>; - -def HEXAGON_S2_asl_r_r_nac: - si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>; -def HEXAGON_S2_asr_r_r_nac: - si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>; -def HEXAGON_S2_lsl_r_r_nac: - si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>; -def HEXAGON_S2_lsr_r_r_nac: - si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>; -def HEXAGON_S2_asl_r_p_nac: - di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>; -def HEXAGON_S2_asr_r_p_nac: - di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>; -def HEXAGON_S2_lsl_r_p_nac: - di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>; -def HEXAGON_S2_lsr_r_p_nac: - di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>; - -// STYPE / SHIFT / Shift by register and logical. -def HEXAGON_S2_asl_r_r_and: - si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>; -def HEXAGON_S2_asr_r_r_and: - si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>; -def HEXAGON_S2_lsl_r_r_and: - si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>; -def HEXAGON_S2_lsr_r_r_and: - si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>; - -def HEXAGON_S2_asl_r_r_or: - si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>; -def HEXAGON_S2_asr_r_r_or: - si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>; -def HEXAGON_S2_lsl_r_r_or: - si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>; -def HEXAGON_S2_lsr_r_r_or: - si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>; - -def HEXAGON_S2_asl_r_p_and: - di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>; -def HEXAGON_S2_asr_r_p_and: - di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>; -def HEXAGON_S2_lsl_r_p_and: - di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>; -def HEXAGON_S2_lsr_r_p_and: - di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>; - -def HEXAGON_S2_asl_r_p_or: - di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>; -def HEXAGON_S2_asr_r_p_or: - di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>; -def HEXAGON_S2_lsl_r_p_or: - di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>; -def HEXAGON_S2_lsr_r_p_or: - di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>; - -// STYPE / SHIFT / Shift by register with saturation. -def HEXAGON_S2_asl_r_r_sat: - si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>; -def HEXAGON_S2_asr_r_r_sat: - si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>; - // STYPE / SHIFT / Table Index. def Hexagon_S2_tableidxb_goodsyntax: si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>; diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td b/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td index 77b148b..51ddb1b 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td @@ -12,6 +12,18 @@ // 80-V9418-12 Rev. A // June 15, 2010 +// Shift an immediate left by register amount +def : T_IR_pat; + +// Shift and add/sub/and/or +def : T_IRI_pat ; +def : T_IRI_pat ; +def : T_IRI_pat ; +def : T_IRI_pat ; +def : T_IRI_pat ; +def : T_IRI_pat ; +def : T_IRI_pat ; +def : T_IRI_pat ; // // ALU 32 types. diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td b/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td index 1d44b52..3724a58 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsicsV5.td @@ -1,3 +1,16 @@ +//===- HexagonIntrinsicsV5.td - V4 Instruction intrinsics --*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +def : T_PI_pat ; +def : T_PI_pat ; + class sf_SInst_sf : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), !strconcat("$dst = ", !strconcat(opc , "($src1)")), diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll new file mode 100644 index 0000000..9917d77 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll @@ -0,0 +1,635 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT + +; Shift by immediate +declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) +define i64 @S2_asr_i_p(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = asr(r1:0, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) +define i64 @S2_lsr_i_p(i64 %a) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = lsr(r1:0, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) +define i64 @S2_asl_i_p(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = asl(r1:0, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32) +define i32 @S2_asr_i_r(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asr(r0, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32) +define i32 @S2_lsr_i_r(i32 %a) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = lsr(r0, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32) +define i32 @S2_asl_i_r(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asl(r0, #0) + +; Shift by immediate and accumulate +declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32) +define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 -= asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32) +define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 -= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32) +define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 -= asl(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32) +define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 += asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32) +define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 += lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32) +define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 += asl(r3:2, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32) +define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32) +define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32) +define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 -= asl(r1, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32) +define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32) +define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32) +define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 += asl(r1, #0) + +; Shift by immediate and add +declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32) +define i32 @S4_addi_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32) +define i32 @S4_subi_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = sub(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32) +define i32 @S4_addi_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(#0, lsr(r0, #0)) + +declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32) +define i32 @S4_subi_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = sub(#0, lsr(r0, #0)) + +declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32) +define i32 @S2_addasl_rrri(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 = addasl(r0, r1, #0) + +; Shift by immediate and logical +declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32) +define i64 @S2_asr_i_p_and(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 &= asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32) +define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 &= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32) +define i64 @S2_asl_i_p_and(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 &= asl(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32) +define i64 @S2_asr_i_p_or(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 |= asr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32) +define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 |= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32) +define i64 @S2_asl_i_p_or(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 |= asl(r3:2, #0) + +declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32) +define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 ^= lsr(r3:2, #0) + +declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32) +define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0) + ret i64 %z +} +; CHECK: r1:0 ^= asl(r3:2, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32) +define i32 @S2_asr_i_r_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 &= asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32) +define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 &= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32) +define i32 @S2_asl_i_r_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 &= asl(r1, #0) + +declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32) +define i32 @S2_asr_i_r_or(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= asr(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32) +define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32) +define i32 @S2_asl_i_r_or(i32%a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 |= asl(r1, #0) + +declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32) +define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 ^= lsr(r1, #0) + +declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32) +define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 ^= asl(r1, #0) + +declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32) +define i32 @S4_andi_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = and(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32) +define i32 @S4_ori_asl_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = or(#0, asl(r0, #0)) + +declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32) +define i32 @S4_andi_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = and(#0, lsr(r0, #0)) + +declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32) +define i32 @S4_ori_lsr_ri(i32 %a) { + %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = or(#0, lsr(r0, #0)) + +; Shift right by immediate with rounding +declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32) +define i64 @S2_asr_i_p_rnd(i64 %a) { + %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0) + ret i64 %z +} +; CHECK: r1:0 = asr(r1:0, #0):rnd + +declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32) +define i32 @S2_asr_i_r_rnd(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asr(r0, #0):rnd + +; Shift left by immediate with saturation +declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32) +define i32 @S2_asl_i_r_sat(i32 %a) { + %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = asl(r0, #0):sat + +; Shift by register +declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32) +define i64 @S2_asr_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = asr(r1:0, r2) + +declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32) +define i64 @S2_lsr_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = lsr(r1:0, r2) + +declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) +define i64 @S2_asl_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = asl(r1:0, r2) + +declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32) +define i64 @S2_lsl_r_p(i64 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = lsl(r1:0, r2) + +declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32) +define i32 @S2_asr_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asr(r0, r1) + +declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) +define i32 @S2_lsr_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = lsr(r0, r1) + +declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) +define i32 @S2_asl_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asl(r0, r1) + +declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32) +define i32 @S2_lsl_r_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = lsl(r0, r1) + +declare i32 @llvm.hexagon.S4.lsli(i32, i32) +define i32 @S4_lsli(i32 %a) { + %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a) + ret i32 %z +} +; CHECK: r0 = lsl(#0, r0) + +; Shift by register and accumulate +declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32) +define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32) +define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32) +define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32) +define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 -= lsl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32) +define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32) +define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32) +define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32) +define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 += lsl(r3:2, r4) + +declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32) +define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32) +define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32) +define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32) +define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 -= lsl(r1, r2) + +declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32) +define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32) +define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32) +define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32) +define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 += lsl(r1, r2) + +; Shift by register and logical +declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32) +define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32) +define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32) +define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32) +define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 |= lsl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32) +define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= asr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32) +define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= lsr(r3:2, r4) + +declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32) +define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= asl(r3:2, r4) + +declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32) +define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) { + %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c) + ret i64 %z +} +; CHECK: r1:0 &= lsl(r3:2, r4) + +declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32) +define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32) +define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32) +define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32) +define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 |= lsl(r1, r2) + +declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32) +define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= asr(r1, r2) + +declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32) +define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= lsr(r1, r2) + +declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32) +define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= asl(r1, r2) + +declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32) +define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) { + %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c) + ret i32 %z +} +; CHECK: r0 &= lsl(r1, r2) + +; Shift by register with saturation +declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) +define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asr(r0, r1):sat + +declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) +define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = asl(r0, r1):sat diff --git a/llvm/test/MC/Disassembler/Hexagon/xtype_shift.txt b/llvm/test/MC/Disassembler/Hexagon/xtype_shift.txt index 9912fd3..a057738 100644 --- a/llvm/test/MC/Disassembler/Hexagon/xtype_shift.txt +++ b/llvm/test/MC/Disassembler/Hexagon/xtype_shift.txt @@ -70,6 +70,10 @@ # CHECK: r17 = and(#21, lsr(r17, #31)) 0x5a 0xff 0x11 0xde # CHECK: r17 = or(#21, lsr(r17, #31)) +0xf0 0xdf 0xd4 0x80 +# CHECK: r17:16 = asr(r21:20, #31):rnd +0x11 0xdf 0x55 0x8c +# CHECK: r17 = asr(r21, #31):rnd 0x11 0xdf 0x55 0x8e # CHECK: r17 &= asr(r21, #31) 0x31 0xdf 0x55 0x8e -- 2.7.4