From: Colin LeMahieu Date: Mon, 19 Jan 2015 18:56:19 +0000 (+0000) Subject: [Hexagon] Converting intrinsics combine imm/imm, simple shifts and extends. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=fcd4569af67b19e04aec436dbd31ad158d6d1e72;p=platform%2Fupstream%2Fllvm.git [Hexagon] Converting intrinsics combine imm/imm, simple shifts and extends. llvm-svn: 226483 --- diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td index 2b1a812..787e743 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td @@ -21,6 +21,10 @@ class T_R_pat : Pat <(IntID I32:$Rs), (MI I32:$Rs)>; +class T_II_pat + : Pat<(IntID Imm1:$Is, Imm2:$It), + (MI Imm1:$Is, Imm2:$It)>; + class T_RI_pat > : Pat<(IntID I32:$Rs, ImmPred:$It), (MI I32:$Rs, ImmPred:$It)>; @@ -33,6 +37,18 @@ class T_RR_pat : Pat <(IntID I32:$Rs, I32:$Rt), (MI I32:$Rs, I32:$Rt)>; +class T_QII_pat + : Pat <(IntID (i32 PredRegs:$Ps), Imm1:$Is, Imm2:$It), + (MI PredRegs:$Ps, Imm1:$Is, Imm2:$It)>; + +class T_QRI_pat + : Pat <(IntID (i32 PredRegs:$Ps), I32:$Rs, ImmPred:$Is), + (MI PredRegs:$Ps, I32:$Rs, ImmPred:$Is)>; + +class T_QIR_pat + : Pat <(IntID (i32 PredRegs:$Ps), ImmPred:$Is, I32:$Rs), + (MI PredRegs:$Ps, ImmPred:$Is, I32:$Rs)>; + class T_RRR_pat : Pat <(IntID I32:$Rs, I32:$Rt, I32:$Ru), (MI I32:$Rs, I32:$Rt, I32:$Ru)>; @@ -267,6 +283,32 @@ def : T_I_pat ; def : Pat<(int_hexagon_A2_tfrp DoubleRegs:$src), (A2_combinew (HiReg DoubleRegs:$src), (LoReg DoubleRegs:$src))>; +/******************************************************************** +* ALU32/PERM * +*********************************************************************/ +// Combine +def: T_RR_pat; +def: T_RR_pat; +def: T_RR_pat; +def: T_RR_pat; + +def: T_II_pat; + +def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs), + (I32:$Rt))), + (i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>; + +// Shift halfword +def : T_R_pat; +def : T_R_pat; +def : T_R_pat; + +// Sign/zero extend +def : T_R_pat; +def : T_R_pat; +def : T_R_pat; +def : T_R_pat; + // // ALU 32 types. // diff --git a/llvm/test/CodeGen/Hexagon/intrinsics-alu32-2.ll b/llvm/test/CodeGen/Hexagon/intrinsics-alu32-2.ll index 8da27ee..2d90031 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics-alu32-2.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics-alu32-2.ll @@ -127,6 +127,15 @@ entry: ret void } +; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}combine(##-1280{{ *}},{{ *}}#120) + +define void @test25(i32 %a) #0 { +entry: + %0 = tail call i64 @llvm.hexagon.A2.combineii(i32 -1280, i32 120) + store i64 %0, i64* @c, align 4 + ret void +} + declare i32 @llvm.hexagon.A2.add(i32, i32) #1 declare i32 @llvm.hexagon.A2.sub(i32, i32) #1 declare i32 @llvm.hexagon.A2.and(i32, i32) #1 @@ -139,3 +148,4 @@ declare i32 @llvm.hexagon.A2.orir(i32, i32) #1 declare i32 @llvm.hexagon.A2.subri(i32, i32) declare i32 @llvm.hexagon.A2.tfril(i32, i32) #1 declare i32 @llvm.hexagon.A2.tfrih(i32, i32) #1 +declare i64 @llvm.hexagon.A2.combineii(i32, i32) #1 diff --git a/llvm/test/CodeGen/Hexagon/intrinsics-alu32.ll b/llvm/test/CodeGen/Hexagon/intrinsics-alu32.ll new file mode 100644 index 0000000..2218e26 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics-alu32.ll @@ -0,0 +1,83 @@ +; RUN: llc -march=hexagon < %s | FileCheck %s + +; Verify that ALU32 - aslh, asrh, sxth, sxtb, zxth, zxtb intrinsics +; are lowered to the right instructions. + +@c = external global i64 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}aslh({{ *}}r{{[0-9]+}}{{ *}}) +define void @test1(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.aslh(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.A2.aslh(i32) #1 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}}) +define void @test2(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.asrh(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.A2.asrh(i32) #1 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxtb({{ *}}r{{[0-9]+}}{{ *}}) +define void @test3(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.sxtb(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.A2.sxtb(i32) #1 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxth({{ *}}r{{[0-9]+}}{{ *}}) +define void @test4(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.sxth(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.A2.sxth(i32) #1 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxtb({{ *}}r{{[0-9]+}}{{ *}}) +define void @test6(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.zxtb(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.A2.zxtb(i32) #1 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxth({{ *}}r{{[0-9]+}}{{ *}}) +define void @test7(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.A2.zxth(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.A2.zxth(i32) #1 + +; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}}) +define void @test8(i32 %a) #0 { +entry: + %0 = tail call i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32 %a) + %conv = sext i32 %0 to i64 + store i64 %conv, i64* @c, align 8 + ret void +} + +declare i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32) #1