From: Colin LeMahieu Date: Wed, 28 Jan 2015 18:06:23 +0000 (+0000) Subject: [Hexagon] Converting XTYPE/BIT intrinsic patterns and adding tests. X-Git-Tag: llvmorg-3.7.0-rc1~13834 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=39b846ce0f72a9d460725385c8774dba4f707852;p=platform%2Fupstream%2Fllvm.git [Hexagon] Converting XTYPE/BIT intrinsic patterns and adding tests. llvm-svn: 227335 --- diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td index f11fc7d..ba4cfae 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td @@ -21,6 +21,10 @@ class T_R_pat : Pat <(IntID I32:$Rs), (MI I32:$Rs)>; +class T_P_pat + : Pat <(IntID I64:$Rs), + (MI DoubleRegs:$Rs)>; + class T_II_pat : Pat<(IntID Imm1:$Is, Imm2:$It), (MI Imm1:$Is, Imm2:$It)>; @@ -37,6 +41,10 @@ class T_PI_pat : Pat<(IntID I64:$Rs, imm:$It), (MI DoubleRegs:$Rs, imm:$It)>; +class T_RP_pat + : Pat<(IntID I32:$Rs, I64:$Rt), + (MI I32:$Rs, DoubleRegs:$Rt)>; + class T_RR_pat : Pat <(IntID I32:$Rs, I32:$Rt), (MI I32:$Rs, I32:$Rt)>; @@ -61,6 +69,10 @@ class T_RRI_pat : Pat <(IntID I32:$Rs, I32:$Rt, imm:$Iu), (MI I32:$Rs, I32:$Rt, imm:$Iu)>; +class T_RII_pat + : Pat <(IntID I32:$Rs, imm:$It, imm:$Iu), + (MI I32:$Rs, imm:$It, imm:$Iu)>; + class T_IRI_pat : Pat <(IntID imm:$It, I32:$Rs, imm:$Iu), (MI imm:$It, I32:$Rs, imm:$Iu)>; @@ -73,6 +85,10 @@ class T_PPI_pat : Pat <(IntID I64:$Rs, I64:$Rt, imm:$Iu), (MI DoubleRegs:$Rs, DoubleRegs:$Rt, imm:$Iu)>; +class T_PII_pat + : Pat <(IntID I64:$Rs, imm:$It, imm:$Iu), + (MI DoubleRegs:$Rs, imm:$It, imm:$Iu)>; + class T_PPR_pat : Pat <(IntID I64:$Rs, I64:$Rt, I32:$Ru), (MI DoubleRegs:$Rs, DoubleRegs:$Rt, I32:$Ru)>; @@ -549,6 +565,17 @@ def: T_PP_pat; def: T_PP_pat; def: T_RR_pat; +// MPY - Multiply and use full result +// Rdd = mpy[u](Rs, Rt) +def : T_RR_pat ; +def : T_RR_pat ; + +// Rxx[+-]= mpy[u](Rs,Rt) +def : T_PRR_pat ; +def : T_PRR_pat ; +def : T_PRR_pat ; +def : T_PRR_pat ; + // Multiply 32x32 and use lower result def : T_RRI_pat ; def : T_RRI_pat ; @@ -566,9 +593,98 @@ def : T_RRI_pat ; // XOR and XOR with destination def : T_RRR_pat ; +class MType_R32_pat : + Pat <(IntID IntRegs:$src1, IntRegs:$src2), + (OutputInst IntRegs:$src1, IntRegs:$src2)>; + +// Multiply and use lower result +def : MType_R32_pat ; +def : T_RI_pat; + +// Assembler mapped from Rd32=mpyui(Rs32,Rt32) to Rd32=mpyi(Rs32,Rt32) +def : MType_R32_pat ; + +// Multiply and use upper result +def : MType_R32_pat ; +def : MType_R32_pat ; +def : MType_R32_pat ; +def : MType_R32_pat ; +def : MType_R32_pat ; + +/******************************************************************** +* STYPE/ALU * +*********************************************************************/ +def : T_P_pat ; +def : T_P_pat ; +def : T_P_pat ; + +/******************************************************************** +* STYPE/BIT * +*********************************************************************/ + +// Count leading/trailing +def: T_R_pat; +def: T_P_pat; +def: T_R_pat; +def: T_P_pat; +def: T_R_pat; +def: T_P_pat; +def: T_R_pat; +def: T_R_pat; +def: T_R_pat; + +// Compare bit mask +def: T_RR_pat; +def: T_RI_pat; +def: T_RR_pat; + // Shift by immediate and add def : T_RRI_pat; +// Extract bitfield +def : T_PII_pat; +def : T_RII_pat; +def : T_RP_pat ; +def : T_PP_pat ; + +// Insert bitfield +def : Pat <(int_hexagon_S2_insert_rp IntRegs:$src1, IntRegs:$src2, + DoubleRegs:$src3), + (S2_insert_rp IntRegs:$src1, IntRegs:$src2, DoubleRegs:$src3)>; + +def : Pat<(i64 (int_hexagon_S2_insertp_rp (I64:$src1), + (I64:$src2), (I64:$src3))), + (i64 (S2_insertp_rp (I64:$src1), (I64:$src2), + (I64:$src3)))>; + +def : Pat<(int_hexagon_S2_insert IntRegs:$src1, IntRegs:$src2, + u5ImmPred:$src3, u5ImmPred:$src4), + (S2_insert IntRegs:$src1, IntRegs:$src2, + u5ImmPred:$src3, u5ImmPred:$src4)>; + +def : Pat<(i64 (int_hexagon_S2_insertp (I64:$src1), + (I64:$src2), u6ImmPred:$src3, u6ImmPred:$src4)), + (i64 (S2_insertp (I64:$src1), (I64:$src2), + u6ImmPred:$src3, u6ImmPred:$src4))>; + + +// Innterleave/deinterleave +def : T_P_pat ; +def : T_P_pat ; + +// Set/Clear/Toggle Bit +def: T_RI_pat; +def: T_RI_pat; +def: T_RI_pat; + +def: T_RR_pat; +def: T_RR_pat; +def: T_RR_pat; + +// Test Bit +def: T_RI_pat; +def: T_RR_pat; + /******************************************************************** * STYPE/SHIFT * *********************************************************************/ @@ -2908,8 +3024,6 @@ def Hexagon_M2_mpysmi: si_MInst_sis9 <"mpyi", int_hexagon_M2_mpysmi>; def HEXAGON_M2_mpyi: si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>; -def HEXAGON_M2_mpyui: - si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>; def HEXAGON_M2_macsip: si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>; def HEXAGON_M2_maci: @@ -2989,32 +3103,6 @@ def HEXAGON_M2_mmacuhs_rs0: def HEXAGON_M2_mmacuhs_s0: di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>; -// MTYPE / MPYH / Multiply and use upper result. -def HEXAGON_M2_hmmpyh_rs1: - si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>; -def HEXAGON_M2_hmmpyl_rs1: - si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>; -def HEXAGON_M2_mpy_up: - si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>; -def HEXAGON_M2_dpmpyss_rnd_s0: - si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>; -def HEXAGON_M2_mpyu_up: - si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>; - -// MTYPE / MPYH / Multiply and use full result. -def HEXAGON_M2_dpmpyuu_s0: - di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>; -def HEXAGON_M2_dpmpyuu_acc_s0: - di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>; -def HEXAGON_M2_dpmpyuu_nac_s0: - di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>; -def HEXAGON_M2_dpmpyss_s0: - di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>; -def HEXAGON_M2_dpmpyss_acc_s0: - di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>; -def HEXAGON_M2_dpmpyss_nac_s0: - di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>; - /******************************************************************** * MTYPE/VB * *********************************************************************/ @@ -3096,15 +3184,9 @@ def HEXAGON_M2_vrmac_s0: // STYPE / ALU / Absolute value. def HEXAGON_A2_abs: si_SInst_si <"abs", int_hexagon_A2_abs>; -def HEXAGON_A2_absp: - di_SInst_di <"abs", int_hexagon_A2_absp>; def HEXAGON_A2_abssat: si_SInst_si_sat <"abs", int_hexagon_A2_abssat>; -// STYPE / ALU / Logical Not. -def HEXAGON_A2_notp: - di_SInst_di <"not", int_hexagon_A2_notp>; - // STYPE / ALU / Sign extend word to doubleword. def HEXAGON_A2_sxtw: di_SInst_si <"sxtw", int_hexagon_A2_sxtw>; diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td b/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td index 51ddb1b..894ead69 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td @@ -12,6 +12,20 @@ // 80-V9418-12 Rev. A // June 15, 2010 +def : T_P_pat ; + +def: T_P_pat ; +def: T_P_pat ; +def: T_RR_pat; +def: T_RR_pat; +def: T_RI_pat; + +// Extract bitfield +def : T_PP_pat ; +def : T_RP_pat ; +def : T_PII_pat ; +def : T_RII_pat ; + // Shift an immediate left by register amount def : T_IR_pat; @@ -25,6 +39,19 @@ def : T_IRI_pat ; def : T_IRI_pat ; def : T_IRI_pat ; +// Split bitfield +def : T_RI_pat ; +def : T_RR_pat ; + +def: T_RR_pat; + +def: T_RI_pat; +def: T_RR_pat; + +def: T_RI_pat; +def: T_PI_pat; +def: T_P_pat ; + // // ALU 32 types. // diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll new file mode 100644 index 0000000..a7fc754 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll @@ -0,0 +1,329 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT + +; Count leading +declare i32 @llvm.hexagon.S2.clbp(i64) +define i32 @S2_clbp(i64 %a) { + %z = call i32 @llvm.hexagon.S2.clbp(i64 %a) + ret i32 %z +} +; CHECK: r0 = clb(r1:0) + +declare i32 @llvm.hexagon.S2.cl0p(i64) +define i32 @S2_cl0p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a) + ret i32 %z +} +; CHECK: r0 = cl0(r1:0) + +declare i32 @llvm.hexagon.S2.cl1p(i64) +define i32 @S2_cl1p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a) + ret i32 %z +} +; CHECK: r0 = cl1(r1:0) + +declare i32 @llvm.hexagon.S4.clbpnorm(i64) +define i32 @S4_clbpnorm(i64 %a) { + %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a) + ret i32 %z +} +; CHECK: r0 = normamt(r1:0) + +declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32) +define i32 @S4_clbpaddi(i64 %a) { + %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(clb(r1:0), #0) + +declare i32 @llvm.hexagon.S4.clbaddi(i32, i32) +define i32 @S4_clbaddi(i32 %a) { + %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(clb(r0), #0) + +declare i32 @llvm.hexagon.S2.cl0(i32) +define i32 @S2_cl0(i32 %a) { + %z = call i32 @llvm.hexagon.S2.cl0(i32 %a) + ret i32 %z +} +; CHECK: r0 = cl0(r0) + +declare i32 @llvm.hexagon.S2.cl1(i32) +define i32 @S2_cl1(i32 %a) { + %z = call i32 @llvm.hexagon.S2.cl1(i32 %a) + ret i32 %z +} +; CHECK: r0 = cl1(r0) + +declare i32 @llvm.hexagon.S2.clbnorm(i32) +define i32 @S4_clbnorm(i32 %a) { + %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a) + ret i32 %z +} +; CHECK: r0 = normamt(r0) + +; Count population +declare i32 @llvm.hexagon.S5.popcountp(i64) +define i32 @S5_popcountp(i64 %a) { + %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a) + ret i32 %z +} +; CHECK: r0 = popcount(r1:0) + +; Count trailing +declare i32 @llvm.hexagon.S2.ct0p(i64) +define i32 @S2_ct0p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a) + ret i32 %z +} +; CHECK: r0 = ct0(r1:0) + +declare i32 @llvm.hexagon.S2.ct1p(i64) +define i32 @S2_ct1p(i64 %a) { + %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a) + ret i32 %z +} +; CHECK: r0 = ct1(r1:0) + +declare i32 @llvm.hexagon.S2.ct0(i32) +define i32 @S2_ct0(i32 %a) { + %z = call i32 @llvm.hexagon.S2.ct0(i32 %a) + ret i32 %z +} +; CHECK: r0 = ct0(r0) + +declare i32 @llvm.hexagon.S2.ct1(i32) +define i32 @S2_ct1(i32 %a) { + %z = call i32 @llvm.hexagon.S2.ct1(i32 %a) + ret i32 %z +} +; CHECK: r0 = ct1(r0) + +; Extract bitfield +declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32) +define i64 @S2_extractup(i64 %a) { + %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = extractu(r1:0, #0, #0) + +declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32) +define i64 @S2_extractp(i64 %a) { + %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = extract(r1:0, #0, #0) + +declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32) +define i32 @S2_extractu(i32 %a) { + %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = extractu(r0, #0, #0) + +declare i32 @llvm.hexagon.S4.extract(i32, i32, i32) +define i32 @S2_extract(i32 %a) { + %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = extract(r0, #0, #0) + +declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64) +define i64 @S2_extractup_rp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = extractu(r1:0, r3:2) + +declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64) +define i64 @S4_extractp_rp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = extract(r1:0, r3:2) + +declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64) +define i32 @S2_extractu_rp(i32 %a, i64 %b) { + %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = extractu(r0, r3:2) + +declare i32 @llvm.hexagon.S4.extract.rp(i32, i64) +define i32 @S4_extract_rp(i32 %a, i64 %b) { + %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = extract(r0, r3:2) + +; Insert bitfield +declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32) +define i64 @S2_insertp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = insert(r3:2, #0, #0) + +declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32) +define i32 @S2_insert(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = insert(r1, #0, #0) + +declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64) +define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) { + %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c) + ret i32 %z +} +; CHECK: r0 = insert(r1, r3:2) + +declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64) +define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) { + %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c) + ret i64 %z +} +; CHECK: r1:0 = insert(r3:2, r5:4) + +; Interleave/deinterleave +declare i64 @llvm.hexagon.S2.deinterleave(i64) +define i64 @S2_deinterleave(i64 %a) { + %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = deinterleave(r1:0) + +declare i64 @llvm.hexagon.S2.interleave(i64) +define i64 @S2_interleave(i64 %a) { + %z = call i64 @llvm.hexagon.S2.interleave(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = interleave(r1:0) + +; Linear feedback-shift operation +declare i64 @llvm.hexagon.S2.lfsp(i64, i64) +define i64 @S2_lfsp(i64 %a, i64 %b) { + %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b) + ret i64 %z +} +; CHECK: r1:0 = lfs(r1:0, r3:2) + +; Masked parity +declare i32 @llvm.hexagon.S2.parityp(i64, i64) +define i32 @S2_parityp(i64 %a, i64 %b) { + %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b) + ret i32 %z +} +; CHECK: r0 = parity(r1:0, r3:2) + +declare i32 @llvm.hexagon.S4.parity(i32, i32) +define i32 @S4_parity(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = parity(r0, r1) + +; Bit reverse +declare i64 @llvm.hexagon.S2.brevp(i64) +define i64 @S2_brevp(i64 %a) { + %z = call i64 @llvm.hexagon.S2.brevp(i64 %a) + ret i64 %z +} +; CHECK: r1:0 = brev(r1:0) + +declare i32 @llvm.hexagon.S2.brev(i32) +define i32 @S2_brev(i32 %a) { + %z = call i32 @llvm.hexagon.S2.brev(i32 %a) + ret i32 %z +} +; CHECK: r0 = brev(r0) + +; Set/clear/toggle bit +declare i32 @llvm.hexagon.S2.setbit.i(i32, i32) +define i32 @S2_setbit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = setbit(r0, #0) + +declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32) +define i32 @S2_clrbit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = clrbit(r0, #0) + +declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32) +define i32 @S2_togglebit_i(i32 %a) { + %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = togglebit(r0, #0) + +declare i32 @llvm.hexagon.S2.setbit.r(i32, i32) +define i32 @S2_setbit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = setbit(r0, r1) + +declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32) +define i32 @S2_clrbit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = clrbit(r0, r1) + +declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32) +define i32 @S2_togglebit_r(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = togglebit(r0, r1) + +; Split bitfield +declare i64 @llvm.hexagon.A4.bitspliti(i32, i32) +define i64 @A4_bitspliti(i32 %a) { + %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0) + ret i64 %z +} +; CHECK: = bitsplit(r0, #0) + +declare i64 @llvm.hexagon.A4.bitsplit(i32, i32) +define i64 @A4_bitsplit(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = bitsplit(r0, r1) + +; Table index +declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxb(r1, #0, #0) + +declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxh(r1, #0, #0) + +declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxw(r1, #0, #0) + +declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32) +define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) + ret i32 %z +} +; CHECK: r0 = tableidxd(r1, #0, #0)