From 79ec06525ec063373c519a58976c6ecbb4461ecb Mon Sep 17 00:00:00 2001 From: Colin LeMahieu Date: Fri, 12 Jun 2015 19:57:32 +0000 Subject: [PATCH] [Hexagon] Making intrinsic tests agnostic to register allocation. Narrowing intrinsic parameters to appropriate width. llvm-svn: 239634 --- llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp | 15 + llvm/lib/Target/Hexagon/HexagonIntrinsics.td | 27 +- llvm/lib/Target/Hexagon/HexagonOperands.td | 40 ++ llvm/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll | 63 ++- llvm/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll | 31 +- llvm/test/CodeGen/Hexagon/intrinsics/cr.ll | 39 +- llvm/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll | 282 ++++++------- llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll | 93 ++--- .../CodeGen/Hexagon/intrinsics/xtype_complex.ll | 99 ++--- llvm/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll | 110 +++--- llvm/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll | 434 +++++++++++---------- llvm/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll | 71 ++-- llvm/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll | 99 ++--- .../test/CodeGen/Hexagon/intrinsics/xtype_shift.ll | 205 +++++----- 14 files changed, 848 insertions(+), 760 deletions(-) diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 7a213aa..9123057e6 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -104,6 +104,7 @@ public: SDNode *SelectConstantFP(SDNode *N); SDNode *SelectAdd(SDNode *N); SDNode *SelectBitOp(SDNode *N); + bool isConstExtProfitable(SDNode *N) const; // XformMskToBitPosU5Imm - Returns the bit position which // the single bit 32 bit mask represents. @@ -1327,6 +1328,20 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, return false; } +bool HexagonDAGToDAGISel::isConstExtProfitable(SDNode *N) const { + unsigned UseCount = 0; + unsigned CallCount = 0; + for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { + // Ignore call instructions. + if (I->getOpcode() == ISD::CopyToReg) + ++CallCount; + UseCount++; + } + + return (UseCount <= 1) || (CallCount > 1); + +} + void HexagonDAGToDAGISel::PreprocessISelDAG() { SelectionDAG &DAG = *CurDAG; std::vector Nodes; diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td index 4275230..1d0d015 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td @@ -676,6 +676,7 @@ def : Pat <(int_hexagon_A2_tfrih (I32:$Rs), u16_0ImmPred:$Is), // Transfer Register/immediate. def : T_R_pat ; def : T_I_pat ; +def : T_I_pat ; // Assembler mapped from Rdd32=Rss32 to Rdd32=combine(Rss.H32,Rss.L32) def : Pat<(int_hexagon_A2_tfrp DoubleRegs:$src), @@ -690,15 +691,15 @@ def: T_RR_pat; def: T_RR_pat; def: T_RR_pat; -def: T_II_pat; +def: T_II_pat; def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs), (I32:$Rt))), (i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>; // Mux -def : T_QRI_pat; -def : T_QIR_pat; -def : T_QII_pat; +def : T_QRI_pat; +def : T_QIR_pat; +def : T_QII_pat; // Shift halfword def : T_R_pat; @@ -719,17 +720,17 @@ def : T_RR_pat; def : T_RR_pat; def : T_RR_pat; -def : T_RI_pat; -def : T_RI_pat; -def : T_RI_pat; +def : T_RI_pat; +def : T_RI_pat; +def : T_RI_pat; -def : Pat <(i32 (int_hexagon_C2_cmpgei (I32:$src1), s32ImmPred:$src2)), +def : Pat <(i32 (int_hexagon_C2_cmpgei (I32:$src1), s8ExtPred:$src2)), (i32 (C2_cmpgti (I32:$src1), - (DEC_CONST_SIGNED s32ImmPred:$src2)))>; + (DEC_CONST_SIGNED s8ExtPred:$src2)))>; -def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), u32ImmPred:$src2)), +def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), u8ExtPred:$src2)), (i32 (C2_cmpgtui (I32:$src1), - (DEC_CONST_UNSIGNED u32ImmPred:$src2)))>; + (DEC_CONST_UNSIGNED u8ExtPred:$src2)))>; // The instruction, Pd=cmp.geu(Rs, #u8) -> Pd=cmp.eq(Rs,Rs) when #u8 == 0. def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), 0)), @@ -923,6 +924,10 @@ def: qi_CRInst_qiqi_pat; def: qi_CRInst_qiqi_pat; def: qi_CRInst_qiqi_pat; +// Assembler mapped from Pd4=Ps4 to Pd4=or(Ps4,Ps4) +def : Pat<(int_hexagon_C2_pxfer_map PredRegs:$src), + (C2_pxfer_map PredRegs:$src)>; + // Multiply 32x32 and use lower result def : T_RRI_pat ; def : T_RRI_pat ; diff --git a/llvm/lib/Target/Hexagon/HexagonOperands.td b/llvm/lib/Target/Hexagon/HexagonOperands.td index d5191dc..2bece8f 100644 --- a/llvm/lib/Target/Hexagon/HexagonOperands.td +++ b/llvm/lib/Target/Hexagon/HexagonOperands.td @@ -461,6 +461,46 @@ let PrintMethod = "printExtOperand" in { def u6_3Ext : Operand; } +def s10ExtPred : PatLeaf<(i32 imm), [{ + int64_t v = (int64_t)N->getSExtValue(); + if (isInt<10>(v)) + return true; + + // Return true if extending this immediate is profitable and the value + // can fit in a 32-bit signed field. + return isConstExtProfitable(Node) && isInt<32>(v); +}]>; + +def s8ExtPred : PatLeaf<(i32 imm), [{ + int64_t v = (int64_t)N->getSExtValue(); + if (isInt<8>(v)) + return true; + + // Return true if extending this immediate is profitable and the value + // can fit in a 32-bit signed field. + return isConstExtProfitable(Node) && isInt<32>(v); +}]>; + +def u8ExtPred : PatLeaf<(i32 imm), [{ + int64_t v = (int64_t)N->getSExtValue(); + if (isUInt<8>(v)) + return true; + + // Return true if extending this immediate is profitable and the value + // can fit in a 32-bit unsigned field. + return isConstExtProfitable(Node) && isUInt<32>(v); +}]>; + +def u9ExtPred : PatLeaf<(i32 imm), [{ + int64_t v = (int64_t)N->getSExtValue(); + if (isUInt<9>(v)) + return true; + + // Return true if extending this immediate is profitable and the value + // can fit in a 32-bit unsigned field. + return isConstExtProfitable(Node) && isUInt<32>(v); +}]>; + // This complex pattern exists only to create a machine instruction operand // of type "frame index". There doesn't seem to be a way to do that directly diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/llvm/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll index 37f9f40..fcf80b0 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll @@ -1,27 +1,30 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU +; CHECK-CALL-NOT: call + ; Add declare i32 @llvm.hexagon.A2.addi(i32, i32) define i32 @A2_addi(i32 %a) { %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = add(r0, #0) +; CHECK: = add({{.*}}, #0) declare i32 @llvm.hexagon.A2.add(i32, i32) define i32 @A2_add(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0, r1) +; CHECK: = add({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.addsat(i32, i32) define i32 @A2_addsat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0, r1):sat +; CHECK: = add({{.*}}, {{.*}}):sat ; Logical operations declare i32 @llvm.hexagon.A2.and(i32, i32) @@ -29,43 +32,35 @@ define i32 @A2_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = and(r0, r1) +; CHECK: = and({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.or(i32, i32) define i32 @A2_or(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = or(r0, r1) +; CHECK: = or({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.xor(i32, i32) define i32 @A2_xor(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = xor(r0, r1) +; CHECK: = xor({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.andn(i32, i32) define i32 @A4_andn(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = and(r0, ~r1) +; CHECK: = and({{.*}}, ~{{.*}}) declare i32 @llvm.hexagon.A4.orn(i32, i32) define i32 @A4_orn(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = or(r0, ~r1) - -; Nop -declare void @llvm.hexagon.A2.nop() -define void @A2_nop(i32 %a, i32 %b) { - call void @llvm.hexagon.A2.nop() - ret void -} -; CHECK: nop +; CHECK: = or({{.*}}, ~{{.*}}) ; Subtract declare i32 @llvm.hexagon.A2.sub(i32, i32) @@ -73,14 +68,14 @@ define i32 @A2_sub(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0, r1) +; CHECK: = sub({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.subsat(i32, i32) define i32 @A2_subsat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0, r1):sat +; CHECK: = sub({{.*}}, {{.*}}):sat ; Sign extend declare i32 @llvm.hexagon.A2.sxtb(i32) @@ -88,14 +83,14 @@ define i32 @A2_sxtb(i32 %a) { %z = call i32 @llvm.hexagon.A2.sxtb(i32 %a) ret i32 %z } -; CHECK: r0 = sxtb(r0) +; CHECK: = sxtb({{.*}}) declare i32 @llvm.hexagon.A2.sxth(i32) define i32 @A2_sxth(i32 %a) { %z = call i32 @llvm.hexagon.A2.sxth(i32 %a) ret i32 %z } -; CHECK: r0 = sxth(r0) +; CHECK: = sxth({{.*}}) ; Transfer immediate declare i32 @llvm.hexagon.A2.tfril(i32, i32) @@ -103,21 +98,21 @@ define i32 @A2_tfril(i32 %a) { %z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0) ret i32 %z } -; CHECK: r0.l = #0 +; CHECK: = #0 declare i32 @llvm.hexagon.A2.tfrih(i32, i32) define i32 @A2_tfrih(i32 %a) { %z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0) ret i32 %z } -; CHECK: r0.h = #0 +; CHECK: = #0 declare i32 @llvm.hexagon.A2.tfrsi(i32) define i32 @A2_tfrsi() { %z = call i32 @llvm.hexagon.A2.tfrsi(i32 0) ret i32 %z } -; CHECK: r0 = #0 +; CHECK: = #0 ; Transfer register declare i32 @llvm.hexagon.A2.tfr(i32) @@ -125,7 +120,7 @@ define i32 @A2_tfr(i32 %a) { %z = call i32 @llvm.hexagon.A2.tfr(i32 %a) ret i32 %z } -; CHECK: r0 = r0 +; CHECK: = ; Vector add halfwords declare i32 @llvm.hexagon.A2.svaddh(i32, i32) @@ -133,21 +128,21 @@ define i32 @A2_svaddh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vaddh(r0, r1) +; CHECK: = vaddh({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.svaddhs(i32, i32) define i32 @A2_svaddhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vaddh(r0, r1):sat +; CHECK: = vaddh({{.*}}, {{.*}}):sat declare i32 @llvm.hexagon.A2.svadduhs(i32, i32) define i32 @A2_svadduhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vadduh(r0, r1):sat +; CHECK: = vadduh({{.*}}, {{.*}}):sat ; Vector average halfwords declare i32 @llvm.hexagon.A2.svavgh(i32, i32) @@ -155,21 +150,21 @@ define i32 @A2_svavgh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vavgh(r0, r1) +; CHECK: = vavgh({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.svavghs(i32, i32) define i32 @A2_svavghs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vavgh(r0, r1):rnd +; CHECK: = vavgh({{.*}}, {{.*}}):rnd declare i32 @llvm.hexagon.A2.svnavgh(i32, i32) define i32 @A2_svnavgh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vnavgh(r0, r1) +; CHECK: = vnavgh({{.*}}, {{.*}}) ; Vector subtract halfwords declare i32 @llvm.hexagon.A2.svsubh(i32, i32) @@ -177,21 +172,21 @@ define i32 @A2_svsubh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vsubh(r0, r1) +; CHECK: = vsubh({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.svsubhs(i32, i32) define i32 @A2_svsubhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vsubh(r0, r1):sat +; CHECK: = vsubh({{.*}}, {{.*}}):sat declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32) define i32 @A2_svsubuhs(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vsubuh(r0, r1):sat +; CHECK: = vsubuh({{.*}}, {{.*}}):sat ; Zero extend declare i32 @llvm.hexagon.A2.zxth(i32) @@ -199,4 +194,4 @@ define i32 @A2_zxth(i32 %a) { %z = call i32 @llvm.hexagon.A2.zxth(i32 %a) ret i32 %z } -; CHECK: r0 = zxth(r0) +; CHECK: = zxth({{.*}}) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/llvm/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll index a9cc01c..c9fb0af 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll @@ -1,62 +1,65 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM +; CHECK-CALL-NOT: call + ; Combine words into doubleword declare i64 @llvm.hexagon.A4.combineri(i32, i32) define i64 @A4_combineri(i32 %a) { %z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0) ret i64 %z } -; CHECK: = combine(r0, #0) +; CHECK: = combine({{.*}}, #0) declare i64 @llvm.hexagon.A4.combineir(i32, i32) define i64 @A4_combineir(i32 %a) { %z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a) ret i64 %z } -; CHECK: = combine(#0, r0) +; CHECK: = combine(#0, {{.*}}) declare i64 @llvm.hexagon.A2.combineii(i32, i32) define i64 @A2_combineii() { %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0) ret i64 %z } -; CHECK: r1:0 = combine(#0, #0) +; CHECK: = combine(#0, #0) declare i32 @llvm.hexagon.A2.combine.hh(i32, i32) define i32 @A2_combine_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = combine(r0.h, r1.h) +; CHECK: = combine({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.combine.hl(i32, i32) define i32 @A2_combine_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = combine(r0.h, r1.l) +; CHECK: = combine({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.combine.lh(i32, i32) define i32 @A2_combine_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = combine(r0.l, r1.h) +; CHECK: = combine({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) define i32 @A2_combine_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = combine(r0.l, r1.l) +; CHECK: = combine({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.combinew(i32, i32) define i64 @A2_combinew(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = combine(r0, r1) +; CHECK: = combine({{.*}}, {{.*}}) ; Mux declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) @@ -64,21 +67,21 @@ define i32 @C2_muxri(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: r0 = mux(p0, #0, r1) +; CHECK: = mux({{.*}}, #0, {{.*}}) declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) define i32 @C2_muxir(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 = mux(p0, r1, #0) +; CHECK: = mux({{.*}}, {{.*}}, #0) declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) define i32 @C2_mux(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 = mux(p0, r1, r2) +; CHECK: = mux({{.*}}, {{.*}}, {{.*}}) ; Shift word by 16 declare i32 @llvm.hexagon.A2.aslh(i32) @@ -86,14 +89,14 @@ define i32 @A2_aslh(i32 %a) { %z = call i32 @llvm.hexagon.A2.aslh(i32 %a) ret i32 %z } -; CHECK: r0 = aslh(r0) +; CHECK: = aslh({{.*}}) declare i32 @llvm.hexagon.A2.asrh(i32) define i32 @A2_asrh(i32 %a) { %z = call i32 @llvm.hexagon.A2.asrh(i32 %a) ret i32 %z } -; CHECK: r0 = asrh(r0) +; CHECK: = asrh({{.*}}) ; Pack high and low halfwords declare i64 @llvm.hexagon.S2.packhl(i32, i32) @@ -101,4 +104,4 @@ define i64 @S2_packhl(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = packhl(r0, r1) +; CHECK: = packhl({{.*}}, {{.*}}) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/cr.ll b/llvm/test/CodeGen/Hexagon/intrinsics/cr.ll index 9bdcb25..f308ef8 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/cr.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/cr.ll @@ -1,20 +1,23 @@ ; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.2 CR +; CHECK-CALL-NOT: call + ; Corner detection acceleration declare i32 @llvm.hexagon.C4.fastcorner9(i32, i32) define i32 @C4_fastcorner9(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = fastcorner9(p0, p1) +; CHECK: = fastcorner9({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32) define i32 @C4_fastcorner9_not(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = !fastcorner9(p0, p1) +; CHECK: = !fastcorner9({{.*}}, {{.*}}) ; Logical reductions on predicates declare i32 @llvm.hexagon.C2.any8(i32) @@ -22,7 +25,7 @@ define i32 @C2_any8(i32 %a) { %z = call i32@llvm.hexagon.C2.any8(i32 %a) ret i32 %z } -; CHECK: p0 = any8(p0) +; CHECK: = any8({{.*}}) declare i32 @llvm.hexagon.C2.all8(i32) define i32 @C2_all8(i32 %a) { @@ -30,7 +33,7 @@ define i32 @C2_all8(i32 %a) { ret i32 %z } -; CHECK: p0 = all8(p0) +; CHECK: = all8({{.*}}) ; Logical operations on predicates declare i32 @llvm.hexagon.C2.and(i32, i32) @@ -38,95 +41,95 @@ define i32 @C2_and(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = and(p0, p1) +; CHECK: = and({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32) define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = and(p0, and(p1, p2)) +; CHECK: = and({{.*}}, and({{.*}}, {{.*}})) declare i32 @llvm.hexagon.C2.or(i32, i32) define i32 @C2_or(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = or(p0, p1) +; CHECK: = or({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32) define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = and(p0, or(p1, p2)) +; CHECK: = and({{.*}}, or({{.*}}, {{.*}})) declare i32 @llvm.hexagon.C2.xor(i32, i32) define i32 @C2_xor(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = xor(p0, p1) +; CHECK: = xor({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32) define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = or(p0, and(p1, p2)) +; CHECK: = or({{.*}}, and({{.*}}, {{.*}})) declare i32 @llvm.hexagon.C2.andn(i32, i32) define i32 @C2_andn(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = and(p0, !p1) +; CHECK: = and({{.*}}, !{{.*}}) declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32) define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = or(p0, or(p1, p2)) +; CHECK: = or({{.*}}, or({{.*}}, {{.*}})) declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32) define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = and(p0, and(p1, !p2)) +; CHECK: = and({{.*}}, and({{.*}}, !{{.*}})) declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32) define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = and(p0, or(p1, !p2)) +; CHECK: = and({{.*}}, or({{.*}}, !{{.*}})) declare i32 @llvm.hexagon.C2.not(i32) define i32 @C2_not(i32 %a) { %z = call i32@llvm.hexagon.C2.not(i32 %a) ret i32 %z } -; CHECK: p0 = not(p0) +; CHECK: = not({{.*}}) declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32) define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = or(p0, and(p1, !p2)) +; CHECK: = or({{.*}}, and({{.*}}, !{{.*}})) declare i32 @llvm.hexagon.C2.orn(i32, i32) define i32 @C2_orn(i32 %a, i32 %b) { %z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = or(p0, !p1) +; CHECK: = or({{.*}}, !{{.*}}) declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32) define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) { %z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: p0 = or(p0, or(p1, !p2)) +; CHECK: = or({{.*}}, or({{.*}}, !{{.*}})) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll index 4a11112..c5c23c2 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll @@ -1,13 +1,17 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \ +; RUN: FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.1 XTYPE/ALU +; CHECK-CALL-NOT: call + ; Absolute value doubleword declare i64 @llvm.hexagon.A2.absp(i64) define i64 @A2_absp(i64 %a) { %z = call i64 @llvm.hexagon.A2.absp(i64 %a) ret i64 %z } -; CHECK: r1:0 = abs(r1:0) +; CHECK: = abs({{.*}}) ; Absolute value word declare i32 @llvm.hexagon.A2.abs(i32) @@ -15,14 +19,14 @@ define i32 @A2_abs(i32 %a) { %z = call i32 @llvm.hexagon.A2.abs(i32 %a) ret i32 %z } -; CHECK: r0 = abs(r0) +; CHECK: = abs({{.*}}) declare i32 @llvm.hexagon.A2.abssat(i32) define i32 @A2_abssat(i32 %a) { %z = call i32 @llvm.hexagon.A2.abssat(i32 %a) ret i32 %z } -; CHECK: r0 = abs(r0):sat +; CHECK: = abs({{.*}}):sat ; Add and accumulate declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32) @@ -30,42 +34,42 @@ define i32 @S4_addaddi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 = add(r0, add(r1, #0)) +; CHECK: = add({{.*}}, add({{.*}}, #0)) declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32) define i32 @S4_subaddi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0, sub(#0, r1)) +; CHECK: = add({{.*}}, sub(#0, {{.*}})) declare i32 @llvm.hexagon.M2.accii(i32, i32, i32) define i32 @M2_accii(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 += add(r1, #0) +; CHECK: += add({{.*}}, #0) declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32) define i32 @M2_naccii(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 -= add(r1, #0) +; CHECK: -= add({{.*}}, #0) declare i32 @llvm.hexagon.M2.acci(i32, i32, i32) define i32 @M2_acci(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += add(r1, r2) +; CHECK: += add({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32) define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= add(r1, r2) +; CHECK: -= add({{.*}}, {{.*}}) ; Add doublewords declare i64 @llvm.hexagon.A2.addp(i64, i64) @@ -73,14 +77,14 @@ define i64 @A2_addp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = add(r1:0, r3:2) +; CHECK: = add({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.addpsat(i64, i64) define i64 @A2_addpsat(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = add(r1:0, r3:2):sat +; CHECK: = add({{.*}}, {{.*}}):sat ; Add halfword declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32) @@ -88,84 +92,84 @@ define i32 @A2_addh_l16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.l) +; CHECK: = add({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32) define i32 @A2_addh_l16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.h) +; CHECK: = add({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32) define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.l):sat +; CHECK: = add({{.*}}.l, {{.*}}.l):sat declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32) define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.h):sat +; CHECK: = add({{.*}}.l, {{.*}}.h):sat declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32) define i32 @A2_addh_h16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.l):<<16 +; CHECK: = add({{.*}}.l, {{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32) define i32 @A2_addh_h16_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.h):<<16 +; CHECK: = add({{.*}}.l, {{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32) define i32 @A2_addh_h16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.h, r1.l):<<16 +; CHECK: = add({{.*}}.h, {{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32) define i32 @A2_addh_h16_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.h, r1.h):<<16 +; CHECK: = add({{.*}}.h, {{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32) define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.l):sat:<<16 +; CHECK: = add({{.*}}.l, {{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32) define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.l, r1.h):sat:<<16 +; CHECK: = add({{.*}}.l, {{.*}}.h):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32) define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.h, r1.l):sat:<<16 +; CHECK: = add({{.*}}.h, {{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32) define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0.h, r1.h):sat:<<16 +; CHECK: = add({{.*}}.h, {{.*}}.h):sat:<<16 ; Logical doublewords declare i64 @llvm.hexagon.A2.notp(i64) @@ -173,42 +177,42 @@ define i64 @A2_notp(i64 %a) { %z = call i64 @llvm.hexagon.A2.notp(i64 %a) ret i64 %z } -; CHECK: r1:0 = not(r1:0) +; CHECK: = not({{.*}}) declare i64 @llvm.hexagon.A2.andp(i64, i64) define i64 @A2_andp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = and(r1:0, r3:2) +; CHECK: = and({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A4.andnp(i64, i64) define i64 @A2_andnp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = and(r1:0, ~r3:2) +; CHECK: = and({{.*}}, ~{{.*}}) declare i64 @llvm.hexagon.A2.orp(i64, i64) define i64 @A2_orp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = or(r1:0, r3:2) +; CHECK: = or({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A4.ornp(i64, i64) define i64 @A2_ornp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = or(r1:0, ~r3:2) +; CHECK: = or({{.*}}, ~{{.*}}) declare i64 @llvm.hexagon.A2.xorp(i64, i64) define i64 @A2_xorp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = xor(r1:0, r3:2) +; CHECK: = xor({{.*}}, {{.*}}) ; Logical-logical doublewords declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64) @@ -216,7 +220,7 @@ define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 ^= xor(r3:2, r5:4) +; CHECK: ^= xor({{.*}}, {{.*}}) ; Logical-logical words declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32) @@ -224,91 +228,91 @@ define i32 @S4_or_andi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 |= and(r1, #0) +; CHECK: |= and({{.*}}, #0) declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32) define i32 @S4_or_andix(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r1 = or(r0, and(r1, #0)) +; CHECK: = or({{.*}}, and({{.*}}, #0)) declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32) define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= and(r1, ~r2) +; CHECK: |= and({{.*}}, ~{{.*}}) declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32) define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= and(r1, ~r2) +; CHECK: &= and({{.*}}, ~{{.*}}) declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32) define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 ^= and(r1, ~r2) +; CHECK: ^= and({{.*}}, ~{{.*}}) declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32) define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= and(r1, r2) +; CHECK: &= and({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32) define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= or(r1, r2) +; CHECK: &= or({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32) define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= xor(r1, r2) +; CHECK: &= xor({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32) define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= and(r1, r2) +; CHECK: |= and({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32) define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= or(r1, r2) +; CHECK: |= or({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32) define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= xor(r1, r2) +; CHECK: |= xor({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32) define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 ^= and(r1, r2) +; CHECK: ^= and({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32) define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 ^= or(r1, r2) +; CHECK: ^= or({{.*}}, {{.*}}) ; Maximum words declare i32 @llvm.hexagon.A2.max(i32, i32) @@ -316,14 +320,14 @@ define i32 @A2_max(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = max(r0, r1) +; CHECK: = max({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.maxu(i32, i32) define i32 @A2_maxu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = maxu(r0, r1) +; CHECK: = maxu({{.*}}, {{.*}}) ; Maximum doublewords declare i64 @llvm.hexagon.A2.maxp(i64, i64) @@ -331,14 +335,14 @@ define i64 @A2_maxp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = max(r1:0, r3:2) +; CHECK: = max({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.maxup(i64, i64) define i64 @A2_maxup(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = maxu(r1:0, r3:2) +; CHECK: = maxu({{.*}}, {{.*}}) ; Minimum words declare i32 @llvm.hexagon.A2.min(i32, i32) @@ -346,14 +350,14 @@ define i32 @A2_min(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = min(r0, r1) +; CHECK: = min({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.minu(i32, i32) define i32 @A2_minu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = minu(r0, r1) +; CHECK: = minu({{.*}}, {{.*}}) ; Minimum doublewords declare i64 @llvm.hexagon.A2.minp(i64, i64) @@ -361,14 +365,14 @@ define i64 @A2_minp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = min(r1:0, r3:2) +; CHECK: = min({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.minup(i64, i64) define i64 @A2_minup(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = minu(r1:0, r3:2) +; CHECK: = minu({{.*}}, {{.*}}) ; Module wrap declare i32 @llvm.hexagon.A4.modwrapu(i32, i32) @@ -376,7 +380,7 @@ define i32 @A4_modwrapu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = modwrap(r0, r1) +; CHECK: = modwrap({{.*}}, {{.*}}) ; Negate declare i64 @llvm.hexagon.A2.negp(i64) @@ -384,14 +388,14 @@ define i64 @A2_negp(i64 %a) { %z = call i64 @llvm.hexagon.A2.negp(i64 %a) ret i64 %z } -; CHECK: r1:0 = neg(r1:0) +; CHECK: = neg({{.*}}) declare i32 @llvm.hexagon.A2.negsat(i32) define i32 @A2_negsat(i32 %a) { %z = call i32 @llvm.hexagon.A2.negsat(i32 %a) ret i32 %z } -; CHECK: r0 = neg(r0):sat +; CHECK: = neg({{.*}}):sat ; Round declare i32 @llvm.hexagon.A2.roundsat(i64) @@ -399,49 +403,49 @@ define i32 @A2_roundsat(i64 %a) { %z = call i32 @llvm.hexagon.A2.roundsat(i64 %a) ret i32 %z } -; CHECK: r0 = round(r1:0):sat +; CHECK: = round({{.*}}):sat declare i32 @llvm.hexagon.A4.cround.ri(i32, i32) define i32 @A4_cround_ri(i32 %a) { %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = cround(r0, #0) +; CHECK: = cround({{.*}}, #0) declare i32 @llvm.hexagon.A4.round.ri(i32, i32) define i32 @A4_round_ri(i32 %a) { %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = round(r0, #0) +; CHECK: = round({{.*}}, #0) declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32) define i32 @A4_round_ri_sat(i32 %a) { %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = round(r0, #0):sat +; CHECK: = round({{.*}}, #0):sat declare i32 @llvm.hexagon.A4.cround.rr(i32, i32) define i32 @A4_cround_rr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cround(r0, r1) +; CHECK: = cround({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.round.rr(i32, i32) define i32 @A4_round_rr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = round(r0, r1) +; CHECK: = round({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32) define i32 @A4_round_rr_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = round(r0, r1):sat +; CHECK: = round({{.*}}, {{.*}}):sat ; Subtract doublewords declare i64 @llvm.hexagon.A2.subp(i64, i64) @@ -449,7 +453,7 @@ define i64 @A2_subp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = sub(r1:0, r3:2) +; CHECK: = sub({{.*}}, {{.*}}) ; Subtract and accumulate declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32) @@ -457,7 +461,7 @@ define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += sub(r1, r2) +; CHECK: += sub({{.*}}, {{.*}}) ; Subtract halfwords declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32) @@ -465,84 +469,84 @@ define i32 @A2_subh_l16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.l) +; CHECK: = sub({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32) define i32 @A2_subh_l16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.h) +; CHECK: = sub({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.l):sat +; CHECK: = sub({{.*}}.l, {{.*}}.l):sat declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32) define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.h):sat +; CHECK: = sub({{.*}}.l, {{.*}}.h):sat declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32) define i32 @A2_subh_h16_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.l):<<16 +; CHECK: = sub({{.*}}.l, {{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32) define i32 @A2_subh_h16_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.h):<<16 +; CHECK: = sub({{.*}}.l, {{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32) define i32 @A2_subh_h16_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.h, r1.l):<<16 +; CHECK: = sub({{.*}}.h, {{.*}}.l):<<16 declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32) define i32 @A2_subh_h16_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.h, r1.h):<<16 +; CHECK: = sub({{.*}}.h, {{.*}}.h):<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32) define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.l):sat:<<16 +; CHECK: = sub({{.*}}.l, {{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32) define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.l, r1.h):sat:<<16 +; CHECK: = sub({{.*}}.l, {{.*}}.h):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32) define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.h, r1.l):sat:<<16 +; CHECK: = sub({{.*}}.h, {{.*}}.l):sat:<<16 declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32) define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = sub(r0.h, r1.h):sat:<<16 +; CHECK: = sub({{.*}}.h, {{.*}}.h):sat:<<16 ; Sign extend word to doubleword declare i64 @llvm.hexagon.A2.sxtw(i32) @@ -550,7 +554,7 @@ define i64 @A2_sxtw(i32 %a) { %z = call i64 @llvm.hexagon.A2.sxtw(i32 %a) ret i64 %z } -; CHECK: = sxtw(r0) +; CHECK: = sxtw({{.*}}) ; Vector absolute value halfwords declare i64 @llvm.hexagon.A2.vabsh(i64) @@ -558,14 +562,14 @@ define i64 @A2_vabsh(i64 %a) { %z = call i64 @llvm.hexagon.A2.vabsh(i64 %a) ret i64 %z } -; CHECK: r1:0 = vabsh(r1:0) +; CHECK: = vabsh({{.*}}) declare i64 @llvm.hexagon.A2.vabshsat(i64) define i64 @A2_vabshsat(i64 %a) { %z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a) ret i64 %z } -; CHECK: r1:0 = vabsh(r1:0):sat +; CHECK: = vabsh({{.*}}):sat ; Vector absolute value words declare i64 @llvm.hexagon.A2.vabsw(i64) @@ -573,14 +577,14 @@ define i64 @A2_vabsw(i64 %a) { %z = call i64 @llvm.hexagon.A2.vabsw(i64 %a) ret i64 %z } -; CHECK: r1:0 = vabsw(r1:0) +; CHECK: = vabsw({{.*}}) declare i64 @llvm.hexagon.A2.vabswsat(i64) define i64 @A2_vabswsat(i64 %a) { %z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a) ret i64 %z } -; CHECK: r1:0 = vabsw(r1:0):sat +; CHECK: = vabsw({{.*}}):sat ; Vector absolute difference halfwords declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64) @@ -588,7 +592,7 @@ define i64 @M2_vabsdiffh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vabsdiffh(r1:0, r3:2) +; CHECK: = vabsdiffh({{.*}}, {{.*}}) ; Vector absolute difference words declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64) @@ -596,7 +600,7 @@ define i64 @M2_vabsdiffw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vabsdiffw(r1:0, r3:2) +; CHECK: = vabsdiffw({{.*}}, {{.*}}) ; Vector add halfwords declare i64 @llvm.hexagon.A2.vaddh(i64, i64) @@ -604,21 +608,21 @@ define i64 @A2_vaddh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vaddh(r1:0, r3:2) +; CHECK: = vaddh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vaddhs(i64, i64) define i64 @A2_vaddhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vaddh(r1:0, r3:2):sat +; CHECK: = vaddh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.A2.vadduhs(i64, i64) define i64 @A2_vadduhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vadduh(r1:0, r3:2):sat +; CHECK: = vadduh({{.*}}, {{.*}}):sat ; Vector add halfwords with saturate and pack to unsigned bytes declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64) @@ -626,7 +630,7 @@ define i32 @A5_vaddhubs(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b) ret i32 %z } -; CHECK: r0 = vaddhub(r1:0, r3:2):sat +; CHECK: = vaddhub({{.*}}, {{.*}}):sat ; Vector reduce add unsigned bytes declare i64 @llvm.hexagon.A2.vraddub(i64, i64) @@ -634,14 +638,14 @@ define i64 @A2_vraddub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vraddub(r1:0, r3:2) +; CHECK: = vraddub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64) define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vraddub(r3:2, r5:4) +; CHECK: += vraddub({{.*}}, {{.*}}) ; Vector reduce add halfwords declare i32 @llvm.hexagon.M2.vradduh(i64, i64) @@ -649,14 +653,14 @@ define i32 @M2_vradduh(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b) ret i32 %z } -; CHECK: r0 = vradduh(r1:0, r3:2) +; CHECK: = vradduh({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M2.vraddh(i64, i64) define i32 @M2_vraddh(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b) ret i32 %z } -; CHECK: r0 = vraddh(r1:0, r3:2) +; CHECK: = vraddh({{.*}}, {{.*}}) ; Vector add bytes declare i64 @llvm.hexagon.A2.vaddub(i64, i64) @@ -664,14 +668,14 @@ define i64 @A2_vaddub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vaddub(r1:0, r3:2) +; CHECK: = vaddub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vaddubs(i64, i64) define i64 @A2_vaddubs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vaddub(r1:0, r3:2):sat +; CHECK: = vaddub({{.*}}, {{.*}}):sat ; Vector add words declare i64 @llvm.hexagon.A2.vaddw(i64, i64) @@ -679,14 +683,14 @@ define i64 @A2_vaddw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vaddw(r1:0, r3:2) +; CHECK: = vaddw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vaddws(i64, i64) define i64 @A2_vaddws(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vaddw(r1:0, r3:2):sat +; CHECK: = vaddw({{.*}}, {{.*}}):sat ; Vector average halfwords declare i64 @llvm.hexagon.A2.vavgh(i64, i64) @@ -694,56 +698,56 @@ define i64 @A2_vavgh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgh(r1:0, r3:2) +; CHECK: = vavgh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vavghr(i64, i64) define i64 @A2_vavghr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgh(r1:0, r3:2):rnd +; CHECK: = vavgh({{.*}}, {{.*}}):rnd declare i64 @llvm.hexagon.A2.vavghcr(i64, i64) define i64 @A2_vavghcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgh(r1:0, r3:2):crnd +; CHECK: = vavgh({{.*}}, {{.*}}):crnd declare i64 @llvm.hexagon.A2.vavguh(i64, i64) define i64 @A2_vavguh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavguh(r1:0, r3:2) +; CHECK: = vavguh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vavguhr(i64, i64) define i64 @A2_vavguhr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavguh(r1:0, r3:2):rnd +; CHECK: = vavguh({{.*}}, {{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgh(i64, i64) define i64 @A2_vnavgh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vnavgh(r1:0, r3:2) +; CHECK: = vnavgh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vnavghr(i64, i64) define i64 @A2_vnavghr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vnavgh(r1:0, r3:2):rnd +; CHECK: = vnavgh({{.*}}, {{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64) define i64 @A2_vnavghcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vnavgh(r1:0, r3:2):crnd +; CHECK: = vnavgh({{.*}}, {{.*}}):crnd ; Vector average unsigned bytes declare i64 @llvm.hexagon.A2.vavgub(i64, i64) @@ -751,14 +755,14 @@ define i64 @A2_vavgub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgub(r1:0, r3:2) +; CHECK: vavgub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vavgubr(i64, i64) define i64 @A2_vavgubr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgub(r1:0, r3:2):rnd +; CHECK: = vavgub({{.*}}, {{.*}}):rnd ; Vector average words declare i64 @llvm.hexagon.A2.vavgw(i64, i64) @@ -766,56 +770,56 @@ define i64 @A2_vavgw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgw(r1:0, r3:2) +; CHECK: = vavgw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vavgwr(i64, i64) define i64 @A2_vavgwr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgw(r1:0, r3:2):rnd +; CHECK: = vavgw({{.*}}, {{.*}}):rnd declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64) define i64 @A2_vavgwcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavgw(r1:0, r3:2):crnd +; CHECK: = vavgw({{.*}}, {{.*}}):crnd declare i64 @llvm.hexagon.A2.vavguw(i64, i64) define i64 @A2_vavguw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavguw(r1:0, r3:2) +; CHECK: = vavguw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vavguwr(i64, i64) define i64 @A2_vavguwr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vavguw(r1:0, r3:2):rnd +; CHECK: = vavguw({{.*}}, {{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgw(i64, i64) define i64 @A2_vnavgw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vnavgw(r1:0, r3:2) +; CHECK: = vnavgw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64) define i64 @A2_vnavgwr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vnavgw(r1:0, r3:2):rnd +; CHECK: = vnavgw({{.*}}, {{.*}}):rnd declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64) define i64 @A2_vnavgwcr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vnavgw(r1:0, r3:2):crnd +; CHECK: = vnavgw({{.*}}, {{.*}}):crnd ; Vector conditional negate declare i64 @llvm.hexagon.S2.vcnegh(i64, i32) @@ -823,14 +827,14 @@ define i64 @S2_vcnegh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vcnegh(r1:0, r2) +; CHECK: = vcnegh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32) define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vrcnegh(r3:2, r4) +; CHECK: += vrcnegh({{.*}}, {{.*}}) ; Vector maximum bytes declare i64 @llvm.hexagon.A2.vmaxub(i64, i64) @@ -838,14 +842,14 @@ define i64 @A2_vmaxub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmaxub(r1:0, r3:2) +; CHECK: = vmaxub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vmaxb(i64, i64) define i64 @A2_vmaxb(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmaxb(r1:0, r3:2) +; CHECK: = vmaxb({{.*}}, {{.*}}) ; Vector maximum halfwords declare i64 @llvm.hexagon.A2.vmaxh(i64, i64) @@ -853,14 +857,14 @@ define i64 @A2_vmaxh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmaxh(r1:0, r3:2) +; CHECK: = vmaxh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64) define i64 @A2_vmaxuh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmaxuh(r1:0, r3:2) +; CHECK: = vmaxuh({{.*}}, {{.*}}) ; Vector reduce maximum halfwords declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32) @@ -868,14 +872,14 @@ define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrmaxh(r3:2, r4) +; CHECK: = vrmaxh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32) define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrmaxuh(r3:2, r4) +; CHECK: = vrmaxuh({{.*}}, {{.*}}) ; Vector reduce maximum words declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32) @@ -883,14 +887,14 @@ define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrmaxw(r3:2, r4) +; CHECK: = vrmaxw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32) define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrmaxuw(r3:2, r4) +; CHECK: vrmaxuw({{.*}}, {{.*}}) ; Vector minimum bytes declare i64 @llvm.hexagon.A2.vminub(i64, i64) @@ -898,14 +902,14 @@ define i64 @A2_vminub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vminub(r1:0, r3:2) +; CHECK: = vminub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vminb(i64, i64) define i64 @A2_vminb(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vminb(r1:0, r3:2) +; CHECK: = vminb({{.*}}, {{.*}}) ; Vector minimum halfwords declare i64 @llvm.hexagon.A2.vminh(i64, i64) @@ -913,14 +917,14 @@ define i64 @A2_vminh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vminh(r1:0, r3:2) +; CHECK: = vminh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vminuh(i64, i64) define i64 @A2_vminuh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vminuh(r1:0, r3:2) +; CHECK: = vminuh({{.*}}, {{.*}}) ; Vector reduce minimum halfwords declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32) @@ -928,14 +932,14 @@ define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrminh(r3:2, r4) +; CHECK: = vrminh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32) define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrminuh(r3:2, r4) +; CHECK: = vrminuh({{.*}}, {{.*}}) ; Vector reduce minimum words declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32) @@ -943,14 +947,14 @@ define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrminw(r3:2, r4) +; CHECK: = vrminw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32) define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vrminuw(r3:2, r4) +; CHECK: = vrminuw({{.*}}, {{.*}}) ; Vector sum of absolute differences unsigned bytes declare i64 @llvm.hexagon.A2.vrsadub(i64, i64) @@ -958,14 +962,14 @@ define i64 @A2_vrsadub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrsadub(r1:0, r3:2) +; CHECK: = vrsadub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64) define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrsadub(r3:2, r5:4) +; CHECK: += vrsadub({{.*}}, {{.*}}) ; Vector subtract halfwords declare i64 @llvm.hexagon.A2.vsubh(i64, i64) @@ -973,21 +977,21 @@ define i64 @A2_vsubh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubh(r1:0, r3:2) +; CHECK: = vsubh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vsubhs(i64, i64) define i64 @A2_vsubhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubh(r1:0, r3:2):sat +; CHECK: = vsubh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64) define i64 @A2_vsubuhs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubuh(r1:0, r3:2):sat +; CHECK: = vsubuh({{.*}}, {{.*}}):sat ; Vector subtract bytes declare i64 @llvm.hexagon.A2.vsubub(i64, i64) @@ -995,14 +999,14 @@ define i64 @A2_vsubub(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubub(r1:0, r3:2) +; CHECK: = vsubub({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vsububs(i64, i64) define i64 @A2_vsububs(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubub(r1:0, r3:2):sat +; CHECK: = vsubub({{.*}}, {{.*}}):sat ; Vector subtract words declare i64 @llvm.hexagon.A2.vsubw(i64, i64) @@ -1010,11 +1014,11 @@ define i64 @A2_vsubw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubw(r1:0, r3:2) +; CHECK: = vsubw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.A2.vsubws(i64, i64) define i64 @A2_vsubws(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vsubw(r1:0, r3:2):sat +; CHECK: = vsubw({{.*}}, {{.*}}):sat diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll index 8531b2f..e8f83d0 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll @@ -1,69 +1,72 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT +; CHECK-CALL-NOT: call + ; Count leading declare i32 @llvm.hexagon.S2.clbp(i64) define i32 @S2_clbp(i64 %a) { %z = call i32 @llvm.hexagon.S2.clbp(i64 %a) ret i32 %z } -; CHECK: r0 = clb(r1:0) +; CHECK: = clb({{.*}}) declare i32 @llvm.hexagon.S2.cl0p(i64) define i32 @S2_cl0p(i64 %a) { %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a) ret i32 %z } -; CHECK: r0 = cl0(r1:0) +; CHECK: = cl0({{.*}}) declare i32 @llvm.hexagon.S2.cl1p(i64) define i32 @S2_cl1p(i64 %a) { %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a) ret i32 %z } -; CHECK: r0 = cl1(r1:0) +; CHECK: = cl1({{.*}}) declare i32 @llvm.hexagon.S4.clbpnorm(i64) define i32 @S4_clbpnorm(i64 %a) { %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a) ret i32 %z } -; CHECK: r0 = normamt(r1:0) +; CHECK: = normamt({{.*}}) declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32) define i32 @S4_clbpaddi(i64 %a) { %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0) ret i32 %z } -; CHECK: r0 = add(clb(r1:0), #0) +; CHECK: = add(clb({{.*}}), #0) declare i32 @llvm.hexagon.S4.clbaddi(i32, i32) define i32 @S4_clbaddi(i32 %a) { %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = add(clb(r0), #0) +; CHECK: = add(clb({{.*}}), #0) declare i32 @llvm.hexagon.S2.cl0(i32) define i32 @S2_cl0(i32 %a) { %z = call i32 @llvm.hexagon.S2.cl0(i32 %a) ret i32 %z } -; CHECK: r0 = cl0(r0) +; CHECK: = cl0({{.*}}) declare i32 @llvm.hexagon.S2.cl1(i32) define i32 @S2_cl1(i32 %a) { %z = call i32 @llvm.hexagon.S2.cl1(i32 %a) ret i32 %z } -; CHECK: r0 = cl1(r0) +; CHECK: = cl1({{.*}}) declare i32 @llvm.hexagon.S2.clbnorm(i32) define i32 @S4_clbnorm(i32 %a) { %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a) ret i32 %z } -; CHECK: r0 = normamt(r0) +; CHECK: = normamt({{.*}}) ; Count population declare i32 @llvm.hexagon.S5.popcountp(i64) @@ -71,7 +74,7 @@ define i32 @S5_popcountp(i64 %a) { %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a) ret i32 %z } -; CHECK: r0 = popcount(r1:0) +; CHECK: = popcount({{.*}}) ; Count trailing declare i32 @llvm.hexagon.S2.ct0p(i64) @@ -79,28 +82,28 @@ define i32 @S2_ct0p(i64 %a) { %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a) ret i32 %z } -; CHECK: r0 = ct0(r1:0) +; CHECK: = ct0({{.*}}) declare i32 @llvm.hexagon.S2.ct1p(i64) define i32 @S2_ct1p(i64 %a) { %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a) ret i32 %z } -; CHECK: r0 = ct1(r1:0) +; CHECK: = ct1({{.*}}) declare i32 @llvm.hexagon.S2.ct0(i32) define i32 @S2_ct0(i32 %a) { %z = call i32 @llvm.hexagon.S2.ct0(i32 %a) ret i32 %z } -; CHECK: r0 = ct0(r0) +; CHECK: = ct0({{.*}}) declare i32 @llvm.hexagon.S2.ct1(i32) define i32 @S2_ct1(i32 %a) { %z = call i32 @llvm.hexagon.S2.ct1(i32 %a) ret i32 %z } -; CHECK: r0 = ct1(r0) +; CHECK: = ct1({{.*}}) ; Extract bitfield declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32) @@ -108,56 +111,56 @@ define i64 @S2_extractup(i64 %a) { %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0) ret i64 %z } -; CHECK: r1:0 = extractu(r1:0, #0, #0) +; CHECK: = extractu({{.*}}, #0, #0) declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32) define i64 @S2_extractp(i64 %a) { %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0) ret i64 %z } -; CHECK: r1:0 = extract(r1:0, #0, #0) +; CHECK: = extract({{.*}}, #0, #0) declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32) define i32 @S2_extractu(i32 %a) { %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = extractu(r0, #0, #0) +; CHECK: = extractu({{.*}}, #0, #0) declare i32 @llvm.hexagon.S4.extract(i32, i32, i32) define i32 @S2_extract(i32 %a) { %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = extract(r0, #0, #0) +; CHECK: = extract({{.*}}, #0, #0) declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64) define i64 @S2_extractup_rp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = extractu(r1:0, r3:2) +; CHECK: = extractu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64) define i64 @S4_extractp_rp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = extract(r1:0, r3:2) +; CHECK: = extract({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64) define i32 @S2_extractu_rp(i32 %a, i64 %b) { %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b) ret i32 %z } -; CHECK: r0 = extractu(r0, r3:2) +; CHECK: = extractu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S4.extract.rp(i32, i64) define i32 @S4_extract_rp(i32 %a, i64 %b) { %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b) ret i32 %z } -; CHECK: r0 = extract(r0, r3:2) +; CHECK: = extract({{.*}}, {{.*}}) ; Insert bitfield declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32) @@ -165,28 +168,28 @@ define i64 @S2_insertp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0) ret i64 %z } -; CHECK: r1:0 = insert(r3:2, #0, #0) +; CHECK: = insert({{.*}}, #0, #0) declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32) define i32 @S2_insert(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = insert(r1, #0, #0) +; CHECK: = insert({{.*}}, #0, #0) declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64) define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) { %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c) ret i32 %z } -; CHECK: r0 = insert(r1, r3:2) +; CHECK: = insert({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64) define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 = insert(r3:2, r5:4) +; CHECK: = insert({{.*}}, r5:4) ; Interleave/deinterleave declare i64 @llvm.hexagon.S2.deinterleave(i64) @@ -194,14 +197,14 @@ define i64 @S2_deinterleave(i64 %a) { %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a) ret i64 %z } -; CHECK: r1:0 = deinterleave(r1:0) +; CHECK: = deinterleave({{.*}}) declare i64 @llvm.hexagon.S2.interleave(i64) define i64 @S2_interleave(i64 %a) { %z = call i64 @llvm.hexagon.S2.interleave(i64 %a) ret i64 %z } -; CHECK: r1:0 = interleave(r1:0) +; CHECK: = interleave({{.*}}) ; Linear feedback-shift operation declare i64 @llvm.hexagon.S2.lfsp(i64, i64) @@ -209,7 +212,7 @@ define i64 @S2_lfsp(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = lfs(r1:0, r3:2) +; CHECK: = lfs({{.*}}, {{.*}}) ; Masked parity declare i32 @llvm.hexagon.S2.parityp(i64, i64) @@ -217,14 +220,14 @@ define i32 @S2_parityp(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b) ret i32 %z } -; CHECK: r0 = parity(r1:0, r3:2) +; CHECK: = parity({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S4.parity(i32, i32) define i32 @S4_parity(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = parity(r0, r1) +; CHECK: = parity({{.*}}, {{.*}}) ; Bit reverse declare i64 @llvm.hexagon.S2.brevp(i64) @@ -232,14 +235,14 @@ define i64 @S2_brevp(i64 %a) { %z = call i64 @llvm.hexagon.S2.brevp(i64 %a) ret i64 %z } -; CHECK: r1:0 = brev(r1:0) +; CHECK: = brev({{.*}}) declare i32 @llvm.hexagon.S2.brev(i32) define i32 @S2_brev(i32 %a) { %z = call i32 @llvm.hexagon.S2.brev(i32 %a) ret i32 %z } -; CHECK: r0 = brev(r0) +; CHECK: = brev({{.*}}) ; Set/clear/toggle bit declare i32 @llvm.hexagon.S2.setbit.i(i32, i32) @@ -247,42 +250,42 @@ define i32 @S2_setbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = setbit(r0, #0) +; CHECK: = setbit({{.*}}, #0) declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32) define i32 @S2_clrbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = clrbit(r0, #0) +; CHECK: = clrbit({{.*}}, #0) declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32) define i32 @S2_togglebit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = togglebit(r0, #0) +; CHECK: = togglebit({{.*}}, #0) declare i32 @llvm.hexagon.S2.setbit.r(i32, i32) define i32 @S2_setbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = setbit(r0, r1) +; CHECK: = setbit({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32) define i32 @S2_clrbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = clrbit(r0, r1) +; CHECK: = clrbit({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32) define i32 @S2_togglebit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = togglebit(r0, r1) +; CHECK: = togglebit({{.*}}, {{.*}}) ; Split bitfield declare i64 @llvm.hexagon.A4.bitspliti(i32, i32) @@ -290,14 +293,14 @@ define i64 @A4_bitspliti(i32 %a) { %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0) ret i64 %z } -; CHECK: = bitsplit(r0, #0) +; CHECK: = bitsplit({{.*}}, #0) declare i64 @llvm.hexagon.A4.bitsplit(i32, i32) define i64 @A4_bitsplit(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = bitsplit(r0, r1) +; CHECK: = bitsplit({{.*}}, {{.*}}) ; Table index declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32) @@ -305,25 +308,25 @@ define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = tableidxb(r1, #0, #0) +; CHECK: = tableidxb({{.*}}, #0, #0) declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = tableidxh(r1, #0, #-1) +; CHECK: = tableidxh({{.*}}, #0, #-1) declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = tableidxw(r1, #0, #-2) +; CHECK: = tableidxw({{.*}}, #0, #-2) declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32) define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0) ret i32 %z } -; CHECK: r0 = tableidxd(r1, #0, #-3) +; CHECK: = tableidxd({{.*}}, #0, #-3) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll index 57b0c5b..0087883 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll @@ -1,34 +1,37 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX +; CHECK-CALL-NOT: call + ; Complex add/sub halfwords declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64) define i64 @S4_vxaddsubh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):sat +; CHECK: = vxaddsubh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64) define i64 @S4_vxsubaddh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):sat +; CHECK: = vxsubaddh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64) define i64 @S4_vxaddsubhr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):rnd:>>1:sat +; CHECK: = vxaddsubh({{.*}}, {{.*}}):rnd:>>1:sat declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64) define i64 @S4_vxsubaddhr(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):rnd:>>1:sat +; CHECK: = vxsubaddh({{.*}}, {{.*}}):rnd:>>1:sat ; Complex add/sub words declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64) @@ -36,14 +39,14 @@ define i64 @S4_vxaddsubw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vxaddsubw(r1:0, r3:2):sat +; CHECK: = vxaddsubw({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64) define i64 @S4_vxsubaddw(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vxsubaddw(r1:0, r3:2):sat +; CHECK: = vxsubaddw({{.*}}, {{.*}}):sat ; Complex multiply declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32) @@ -51,84 +54,84 @@ define i64 @M2_cmpys_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = cmpy(r0, r1):sat +; CHECK: = cmpy({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32) define i64 @M2_cmpys_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = cmpy(r0, r1):<<1:sat +; CHECK: = cmpy({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32) define i64 @M2_cmpysc_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = cmpy(r0, r1*):sat +; CHECK: = cmpy({{.*}}, {{.*}}*):sat declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32) define i64 @M2_cmpysc_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = cmpy(r0, r1*):<<1:sat +; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:sat declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32) define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += cmpy(r2, r3):sat +; CHECK: += cmpy({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32) define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += cmpy(r2, r3):<<1:sat +; CHECK: += cmpy({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32) define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= cmpy(r2, r3):sat +; CHECK: -= cmpy({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32) define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= cmpy(r2, r3):<<1:sat +; CHECK: -= cmpy({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32) define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += cmpy(r2, r3*):sat +; CHECK: += cmpy({{.*}}, {{.*}}*):sat declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32) define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += cmpy(r2, r3*):<<1:sat +; CHECK: += cmpy({{.*}}, {{.*}}*):<<1:sat declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32) define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= cmpy(r2, r3*):sat +; CHECK: -= cmpy({{.*}}, {{.*}}*):sat declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32) define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= cmpy(r2, r3*):<<1:sat +; CHECK: -= cmpy({{.*}}, {{.*}}*):<<1:sat ; Complex multiply real or imaginary declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32) @@ -136,28 +139,28 @@ define i64 @M2_cmpyi_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = cmpyi(r0, r1) +; CHECK: = cmpyi({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32) define i64 @M2_cmpyr_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = cmpyr(r0, r1) +; CHECK: = cmpyr({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32) define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += cmpyi(r2, r3) +; CHECK: += cmpyi({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32) define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += cmpyr(r2, r3) +; CHECK: += cmpyr({{.*}}, {{.*}}) ; Complex multiply with round and pack declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32) @@ -165,28 +168,28 @@ define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpy(r0, r1):rnd:sat +; CHECK: = cmpy({{.*}}, {{.*}}):rnd:sat declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32) define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpy(r0, r1):<<1:rnd:sat +; CHECK: = cmpy({{.*}}, {{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32) define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpy(r0, r1*):rnd:sat +; CHECK: = cmpy({{.*}}, {{.*}}*):rnd:sat declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32) define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpy(r0, r1*):<<1:rnd:sat +; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:rnd:sat ; Complex multiply 32x16 declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32) @@ -194,28 +197,28 @@ define i32 @M4_cmpyi_wh(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpyiwh(r1:0, r2):<<1:rnd:sat +; CHECK: = cmpyiwh({{.*}}, {{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32) define i32 @M4_cmpyi_whc(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat +; CHECK: = cmpyiwh({{.*}}, {{.*}}*):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32) define i32 @M4_cmpyr_wh(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpyrwh(r1:0, r2):<<1:rnd:sat +; CHECK: = cmpyrwh({{.*}}, {{.*}}):<<1:rnd:sat declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32) define i32 @M4_cmpyr_whc(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b) ret i32 %z } -; CHECK: r0 = cmpyrwh(r1:0, r2*):<<1:rnd:sat +; CHECK: = cmpyrwh({{.*}}, {{.*}}*):<<1:rnd:sat ; Vector complex multiply real or imaginary declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64) @@ -223,42 +226,42 @@ define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vcmpyr(r1:0, r3:2):sat +; CHECK: = vcmpyr({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64) define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vcmpyr(r1:0, r3:2):<<1:sat +; CHECK: = vcmpyr({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64) define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vcmpyi(r1:0, r3:2):sat +; CHECK: = vcmpyi({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64) define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vcmpyi(r1:0, r3:2):<<1:sat +; CHECK: = vcmpyi({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64) define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vcmpyr(r3:2, r5:4):sat +; CHECK: += vcmpyr({{.*}}, r5:4):sat declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64) define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vcmpyi(r3:2, r5:4):sat +; CHECK: += vcmpyi({{.*}}, r5:4):sat ; Vector complex conjugate declare i64 @llvm.hexagon.A2.vconj(i64) @@ -266,7 +269,7 @@ define i64 @A2_vconj(i64 %a) { %z = call i64 @llvm.hexagon.A2.vconj(i64 %a) ret i64 %z } -; CHECK: r1:0 = vconj(r1:0):sat +; CHECK: = vconj({{.*}}):sat ; Vector complex rotate declare i64 @llvm.hexagon.S2.vcrotate(i64, i32) @@ -274,7 +277,7 @@ define i64 @S2_vcrotate(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vcrotate(r1:0, r2) +; CHECK: = vcrotate({{.*}}, {{.*}}) ; Vector reduce complex multiply real or imaginary declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64) @@ -282,56 +285,56 @@ define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrcmpyi(r1:0, r3:2) +; CHECK: = vrcmpyi({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64) define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrcmpyr(r1:0, r3:2) +; CHECK: = vrcmpyr({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64) define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrcmpyi(r1:0, r3:2*) +; CHECK: = vrcmpyi({{.*}}, {{.*}}*) declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64) define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrcmpyr(r1:0, r3:2*) +; CHECK: = vrcmpyr({{.*}}, {{.*}}*) declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64) define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrcmpyi(r3:2, r5:4) +; CHECK: += vrcmpyi({{.*}}, r5:4) declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64) define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrcmpyr(r3:2, r5:4) +; CHECK: += vrcmpyr({{.*}}, r5:4) declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64) define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrcmpyi(r3:2, r5:4*) +; CHECK: += vrcmpyi({{.*}}, r5:4*) declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64) define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrcmpyr(r3:2, r5:4*) +; CHECK: += vrcmpyr({{.*}}, r5:4*) ; Vector reduce complex rotate declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32) @@ -339,11 +342,11 @@ define i64 @S4_vrcrotate(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0) ret i64 %z } -; CHECK: r1:0 = vrcrotate(r1:0, r2, #0) +; CHECK: = vrcrotate({{.*}}, {{.*}}, #0) declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32) define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0) ret i64 %z } -; CHECK: r1:0 += vrcrotate(r3:2, r4, #0) +; CHECK: += vrcrotate({{.*}}, {{.*}}, #0) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll index aef8127..598d0a8 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll @@ -1,13 +1,17 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \ +; RUN: FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.4 XTYPE/FP +; CHECK-CALL-NOT: call + ; Floating point addition declare float @llvm.hexagon.F2.sfadd(float, float) define float @F2_sfadd(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfadd(float %a, float %b) ret float %z } -; CHECK: r0 = sfadd(r0, r1) +; CHECK: = sfadd({{.*}}, {{.*}}) ; Classify floating-point value declare i32 @llvm.hexagon.F2.sfclass(float, i32) @@ -15,14 +19,14 @@ define i32 @F2_sfclass(float %a) { %z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0) ret i32 %z } -; CHECK: p0 = sfclass(r0, #0) +; CHECK: = sfclass({{.*}}, #0) declare i32 @llvm.hexagon.F2.dfclass(double, i32) define i32 @F2_dfclass(double %a) { %z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0) ret i32 %z } -; CHECK: p0 = dfclass(r1:0, #0) +; CHECK: = dfclass({{.*}}, #0) ; Compare floating-point value declare i32 @llvm.hexagon.F2.sfcmpge(float, float) @@ -30,56 +34,56 @@ define i32 @F2_sfcmpge(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b) ret i32 %z } -; CHECK: p0 = sfcmp.ge(r0, r1) +; CHECK: = sfcmp.ge({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.sfcmpuo(float, float) define i32 @F2_sfcmpuo(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b) ret i32 %z } -; CHECK: p0 = sfcmp.uo(r0, r1) +; CHECK: = sfcmp.uo({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.sfcmpeq(float, float) define i32 @F2_sfcmpeq(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b) ret i32 %z } -; CHECK: p0 = sfcmp.eq(r0, r1) +; CHECK: = sfcmp.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.sfcmpgt(float, float) define i32 @F2_sfcmpgt(float %a, float %b) { %z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b) ret i32 %z } -; CHECK: p0 = sfcmp.gt(r0, r1) +; CHECK: = sfcmp.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.dfcmpge(double, double) define i32 @F2_dfcmpge(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b) ret i32 %z } -; CHECK: p0 = dfcmp.ge(r1:0, r3:2) +; CHECK: = dfcmp.ge({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.dfcmpuo(double, double) define i32 @F2_dfcmpuo(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b) ret i32 %z } -; CHECK: p0 = dfcmp.uo(r1:0, r3:2) +; CHECK: = dfcmp.uo({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.dfcmpeq(double, double) define i32 @F2_dfcmpeq(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b) ret i32 %z } -; CHECK: p0 = dfcmp.eq(r1:0, r3:2) +; CHECK: = dfcmp.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.F2.dfcmpgt(double, double) define i32 @F2_dfcmpgt(double %a, double %b) { %z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b) ret i32 %z } -; CHECK: p0 = dfcmp.gt(r1:0, r3:2) +; CHECK: = dfcmp.gt({{.*}}, {{.*}}) ; Convert floating-point value to other format declare double @llvm.hexagon.F2.conv.sf2df(float) @@ -87,14 +91,14 @@ define double @F2_conv_sf2df(float %a) { %z = call double @llvm.hexagon.F2.conv.sf2df(float %a) ret double %z } -; CHECK: = convert_sf2df(r0) +; CHECK: = convert_sf2df({{.*}}) declare float @llvm.hexagon.F2.conv.df2sf(double) define float @F2_conv_df2sf(double %a) { %z = call float @llvm.hexagon.F2.conv.df2sf(double %a) ret float %z } -; CHECK: r0 = convert_df2sf(r1:0) +; CHECK: = convert_df2sf({{.*}}) ; Convert integer to floating-point value declare double @llvm.hexagon.F2.conv.ud2df(i64) @@ -102,56 +106,56 @@ define double @F2_conv_ud2df(i64 %a) { %z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a) ret double %z } -; CHECK: r1:0 = convert_ud2df(r1:0) +; CHECK: = convert_ud2df({{.*}}) declare double @llvm.hexagon.F2.conv.d2df(i64) define double @F2_conv_d2df(i64 %a) { %z = call double @llvm.hexagon.F2.conv.d2df(i64 %a) ret double %z } -; CHECK: r1:0 = convert_d2df(r1:0) +; CHECK: = convert_d2df({{.*}}) declare double @llvm.hexagon.F2.conv.uw2df(i32) define double @F2_conv_uw2df(i32 %a) { %z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a) ret double %z } -; CHECK: = convert_uw2df(r0) +; CHECK: = convert_uw2df({{.*}}) declare double @llvm.hexagon.F2.conv.w2df(i32) define double @F2_conv_w2df(i32 %a) { %z = call double @llvm.hexagon.F2.conv.w2df(i32 %a) ret double %z } -; CHECK: = convert_w2df(r0) +; CHECK: = convert_w2df({{.*}}) declare float @llvm.hexagon.F2.conv.ud2sf(i64) define float @F2_conv_ud2sf(i64 %a) { %z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a) ret float %z } -; CHECK: r0 = convert_ud2sf(r1:0) +; CHECK: = convert_ud2sf({{.*}}) declare float @llvm.hexagon.F2.conv.d2sf(i64) define float @F2_conv_d2sf(i64 %a) { %z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a) ret float %z } -; CHECK: r0 = convert_d2sf(r1:0) +; CHECK: = convert_d2sf({{.*}}) declare float @llvm.hexagon.F2.conv.uw2sf(i32) define float @F2_conv_uw2sf(i32 %a) { %z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a) ret float %z } -; CHECK: r0 = convert_uw2sf(r0) +; CHECK: = convert_uw2sf({{.*}}) declare float @llvm.hexagon.F2.conv.w2sf(i32) define float @F2_conv_w2sf(i32 %a) { %z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a) ret float %z } -; CHECK: r0 = convert_w2sf(r0) +; CHECK: = convert_w2sf({{.*}}) ; Convert floating-point value to integer declare i64 @llvm.hexagon.F2.conv.df2d(double) @@ -159,112 +163,112 @@ define i64 @F2_conv_df2d(double %a) { %z = call i64 @llvm.hexagon.F2.conv.df2d(double %a) ret i64 %z } -; CHECK: r1:0 = convert_df2d(r1:0) +; CHECK: = convert_df2d({{.*}}) declare i64 @llvm.hexagon.F2.conv.df2ud(double) define i64 @F2_conv_df2ud(double %a) { %z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a) ret i64 %z } -; CHECK: r1:0 = convert_df2ud(r1:0) +; CHECK: {{.*}} = convert_df2ud({{.*}}) declare i64 @llvm.hexagon.F2.conv.df2d.chop(double) define i64 @F2_conv_df2d_chop(double %a) { %z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a) ret i64 %z } -; CHECK: r1:0 = convert_df2d(r1:0):chop +; CHECK: = convert_df2d({{.*}}):chop declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double) define i64 @F2_conv_df2ud_chop(double %a) { %z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a) ret i64 %z } -; CHECK: r1:0 = convert_df2ud(r1:0):chop +; CHECK: = convert_df2ud({{.*}}):chop declare i64 @llvm.hexagon.F2.conv.sf2ud(float) define i64 @F2_conv_sf2ud(float %a) { %z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a) ret i64 %z } -; CHECK: = convert_sf2ud(r0) +; CHECK: = convert_sf2ud({{.*}}) declare i64 @llvm.hexagon.F2.conv.sf2d(float) define i64 @F2_conv_sf2d(float %a) { %z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a) ret i64 %z } -; CHECK: = convert_sf2d(r0) +; CHECK: = convert_sf2d({{.*}}) declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float) define i64 @F2_conv_sf2d_chop(float %a) { %z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a) ret i64 %z } -; CHECK: = convert_sf2d(r0):chop +; CHECK: = convert_sf2d({{.*}}):chop declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float) define i64 @F2_conv_sf2ud_chop(float %a) { %z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a) ret i64 %z } -; CHECK: = convert_sf2ud(r0):chop +; CHECK: = convert_sf2ud({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.df2uw(double) define i32 @F2_conv_df2uw(double %a) { %z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a) ret i32 %z } -; CHECK: r0 = convert_df2uw(r1:0) +; CHECK: = convert_df2uw({{.*}}) declare i32 @llvm.hexagon.F2.conv.df2w(double) define i32 @F2_conv_df2w(double %a) { %z = call i32 @llvm.hexagon.F2.conv.df2w(double %a) ret i32 %z } -; CHECK: r0 = convert_df2w(r1:0) +; CHECK: = convert_df2w({{.*}}) declare i32 @llvm.hexagon.F2.conv.df2w.chop(double) define i32 @F2_conv_df2w_chop(double %a) { %z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a) ret i32 %z } -; CHECK: r0 = convert_df2w(r1:0):chop +; CHECK: = convert_df2w({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double) define i32 @F2_conv_df2uw_chop(double %a) { %z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a) ret i32 %z } -; CHECK: r0 = convert_df2uw(r1:0):chop +; CHECK: = convert_df2uw({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.sf2uw(float) define i32 @F2_conv_sf2uw(float %a) { %z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a) ret i32 %z } -; CHECK: r0 = convert_sf2uw(r0) +; CHECK: = convert_sf2uw({{.*}}) declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float) define i32 @F2_conv_sf2uw_chop(float %a) { %z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a) ret i32 %z } -; CHECK: r0 = convert_sf2uw(r0):chop +; CHECK: = convert_sf2uw({{.*}}):chop declare i32 @llvm.hexagon.F2.conv.sf2w(float) define i32 @F2_conv_sf2w(float %a) { %z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a) ret i32 %z } -; CHECK: r0 = convert_sf2w(r0) +; CHECK: = convert_sf2w({{.*}}) declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float) define i32 @F2_conv_sf2w_chop(float %a) { %z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a) ret i32 %z } -; CHECK: r0 = convert_sf2w(r0):chop +; CHECK: = convert_sf2w({{.*}}):chop ; Floating point extreme value assistance declare float @llvm.hexagon.F2.sffixupr(float) @@ -272,21 +276,21 @@ define float @F2_sffixupr(float %a) { %z = call float @llvm.hexagon.F2.sffixupr(float %a) ret float %z } -; CHECK: r0 = sffixupr(r0) +; CHECK: = sffixupr({{.*}}) declare float @llvm.hexagon.F2.sffixupn(float, float) define float @F2_sffixupn(float %a, float %b) { %z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b) ret float %z } -; CHECK: r0 = sffixupn(r0, r1) +; CHECK: = sffixupn({{.*}}, {{.*}}) declare float @llvm.hexagon.F2.sffixupd(float, float) define float @F2_sffixupd(float %a, float %b) { %z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b) ret float %z } -; CHECK: r0 = sffixupd(r0, r1) +; CHECK: = sffixupd({{.*}}, {{.*}}) ; Floating point fused multiply-add declare float @llvm.hexagon.F2.sffma(float, float, float) @@ -294,14 +298,14 @@ define float @F2_sffma(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c) ret float %z } -; CHECK: r0 += sfmpy(r1, r2) +; CHECK: += sfmpy({{.*}}, {{.*}}) declare float @llvm.hexagon.F2.sffms(float, float, float) define float @F2_sffms(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c) ret float %z } -; CHECK: r0 -= sfmpy(r1, r2) +; CHECK: -= sfmpy({{.*}}, {{.*}}) ; Floating point fused multiply-add with scaling declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32) @@ -309,7 +313,7 @@ define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) { %z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d) ret float %z } -; CHECK: r0 += sfmpy(r1, r2, p0):scale +; CHECK: += sfmpy({{.*}}, {{.*}}, {{.*}}):scale ; Floating point fused multiply-add for library routines declare float @llvm.hexagon.F2.sffma.lib(float, float, float) @@ -317,14 +321,14 @@ define float @F2_sffma_lib(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c) ret float %z } -; CHECK: r0 += sfmpy(r1, r2):lib +; CHECK: += sfmpy({{.*}}, {{.*}}):lib declare float @llvm.hexagon.F2.sffms.lib(float, float, float) define float @F2_sffms_lib(float %a, float %b, float %c) { %z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c) ret float %z } -; CHECK: r0 -= sfmpy(r1, r2):lib +; CHECK: -= sfmpy({{.*}}, {{.*}}):lib ; Create floating-point constant declare float @llvm.hexagon.F2.sfimm.p(i32) @@ -332,28 +336,28 @@ define float @F2_sfimm_p() { %z = call float @llvm.hexagon.F2.sfimm.p(i32 0) ret float %z } -; CHECK: r0 = sfmake(#0):pos +; CHECK: = sfmake(#0):pos declare float @llvm.hexagon.F2.sfimm.n(i32) define float @F2_sfimm_n() { %z = call float @llvm.hexagon.F2.sfimm.n(i32 0) ret float %z } -; CHECK: r0 = sfmake(#0):neg +; CHECK: = sfmake(#0):neg declare double @llvm.hexagon.F2.dfimm.p(i32) define double @F2_dfimm_p() { %z = call double @llvm.hexagon.F2.dfimm.p(i32 0) ret double %z } -; CHECK: r1:0 = dfmake(#0):pos +; CHECK: = dfmake(#0):pos declare double @llvm.hexagon.F2.dfimm.n(i32) define double @F2_dfimm_n() { %z = call double @llvm.hexagon.F2.dfimm.n(i32 0) ret double %z } -; CHECK: r1:0 = dfmake(#0):neg +; CHECK: = dfmake(#0):neg ; Floating point maximum declare float @llvm.hexagon.F2.sfmax(float, float) @@ -361,7 +365,7 @@ define float @F2_sfmax(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfmax(float %a, float %b) ret float %z } -; CHECK: r0 = sfmax(r0, r1) +; CHECK: = sfmax({{.*}}, {{.*}}) ; Floating point minimum declare float @llvm.hexagon.F2.sfmin(float, float) @@ -369,7 +373,7 @@ define float @F2_sfmin(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfmin(float %a, float %b) ret float %z } -; CHECK: r0 = sfmin(r0, r1) +; CHECK: = sfmin({{.*}}, {{.*}}) ; Floating point multiply declare float @llvm.hexagon.F2.sfmpy(float, float) @@ -377,7 +381,7 @@ define float @F2_sfmpy(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b) ret float %z } -; CHECK: r0 = sfmpy(r0, r1) +; CHECK: = sfmpy({{.*}}, {{.*}}) ; Floating point subtraction declare float @llvm.hexagon.F2.sfsub(float, float) @@ -385,4 +389,4 @@ define float @F2_sfsub(float %a, float %b) { %z = call float @llvm.hexagon.F2.sfsub(float %a, float %b) ret float %z } -; CHECK: r0 = sfsub(r0, r1) +; CHECK: = sfsub({{.*}}, {{.*}}) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll index 6409e4e..a149049 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll @@ -1,41 +1,45 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \ +; RUN: FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.5 XTYPE/MPY +; CHECK-CALL-NOT: call + ; Multiply and use lower result declare i32 @llvm.hexagon.M4.mpyrr.addi(i32, i32, i32) define i32 @M4_mpyrr_addi(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = add(#0, mpyi(r0, r1)) +; CHECK: = add(#0, mpyi({{.*}}, {{.*}})) declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32) define i32 @M4_mpyri_addi(i32 %a) { %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = add(#0, mpyi(r0, #0)) +; CHECK: = add(#0, mpyi({{.*}}, #0)) declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32) define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b) ret i32 %z } -; CHECK: r0 = add(r0, mpyi(#0, r1)) +; CHECK: = add({{.*}}, mpyi(#0, {{.*}})) declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32) define i32 @M4_mpyri_addr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 = add(r0, mpyi(r1, #0)) +; CHECK: = add({{.*}}, mpyi({{.*}}, #0)) declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32) define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r1 = add(r0, mpyi(r1, r2)) +; CHECK: = add({{.*}}, mpyi({{.*}}, {{.*}})) ; Vector multiply word by signed half (32x16) declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64) @@ -43,56 +47,56 @@ define i64 @M2_mmpyl_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweh(r1:0, r3:2):sat +; CHECK: = vmpyweh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) define i64 @M2_mmpyl_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:sat +; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64) define i64 @M2_mmpyh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywoh(r1:0, r3:2):sat +; CHECK: = vmpywoh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64) define i64 @M2_mmpyh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:sat +; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64) define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweh(r1:0, r3:2):rnd:sat +; CHECK: = vmpyweh({{.*}}, {{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64) define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:rnd:sat +; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:rnd:sat declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64) define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywoh(r1:0, r3:2):rnd:sat +; CHECK: = vmpywoh({{.*}}, {{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64) define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:rnd:sat +; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:rnd:sat ; Vector multiply word by unsigned half (32x16) declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64) @@ -100,56 +104,56 @@ define i64 @M2_mmpyul_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):sat +; CHECK: = vmpyweuh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64) define i64 @M2_mmpyul_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:sat +; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64) define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywouh(r1:0, r3:2):sat +; CHECK: = vmpywouh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64) define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:sat +; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64) define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):rnd:sat +; CHECK: = vmpyweuh({{.*}}, {{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64) define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:rnd:sat +; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:rnd:sat declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64) define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywouh(r1:0, r3:2):rnd:sat +; CHECK: = vmpywouh({{.*}}, {{.*}}):rnd:sat declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64) define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:rnd:sat +; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:rnd:sat ; Multiply signed halfwords declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32) @@ -157,616 +161,616 @@ define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.l) +; CHECK: = mpy({{.*}}.l, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.l):<<1 +; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32) define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.h) +; CHECK: = mpy({{.*}}.l, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32) define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.h):<<1 +; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32) define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.l) +; CHECK: = mpy({{.*}}.h, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32) define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.l):<<1 +; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32) define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.h) +; CHECK: = mpy({{.*}}.h, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32) define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.h):<<1 +; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32) define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.l):rnd +; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32) define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.l):<<1:rnd +; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32) define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.h):rnd +; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32) define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.l, r1.h):<<1:rnd +; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32) define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.l):rnd +; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32) define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.l):<<1:rnd +; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32) define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.h):rnd +; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32) define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0.h, r1.h):<<1:rnd +; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32) define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.l, r3.l) +; CHECK: += mpy({{.*}}.l, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32) define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.l, r3.l):<<1 +; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32) define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.l, r3.h) +; CHECK: += mpy({{.*}}.l, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32) define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.l, r3.h):<<1 +; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32) define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.h, r3.l) +; CHECK: += mpy({{.*}}.h, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32) define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.h, r3.l):<<1 +; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32) define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.h, r3.h) +; CHECK: += mpy({{.*}}.h, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32) define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2.h, r3.h):<<1 +; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32) define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.l, r3.l) +; CHECK: -= mpy({{.*}}.l, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32) define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.l, r3.l):<<1 +; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32) define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.l, r3.h) +; CHECK: -= mpy({{.*}}.l, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32) define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.l, r3.h):<<1 +; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32) define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.h, r3.l) +; CHECK: -= mpy({{.*}}.h, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32) define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.h, r3.l):<<1 +; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32) define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.h, r3.h) +; CHECK: -= mpy({{.*}}.h, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32) define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2.h, r3.h):<<1 +; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32) define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.l) +; CHECK: = mpy({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32) define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.l):<<1 +; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32) define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.h) +; CHECK: = mpy({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32) define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.h):<<1 +; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32) define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.l) +; CHECK: = mpy({{.*}}.h, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32) define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.l):<<1 +; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32) define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.h) +; CHECK: = mpy({{.*}}.h, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32) define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.h):<<1 +; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32) define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.l):sat +; CHECK: = mpy({{.*}}.l, {{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32) define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.l):<<1:sat +; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32) define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.h):sat +; CHECK: = mpy({{.*}}.l, {{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32) define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.h):<<1:sat +; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32) define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.l):sat +; CHECK: = mpy({{.*}}.h, {{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32) define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.l):<<1:sat +; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32) define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.h):sat +; CHECK: = mpy({{.*}}.h, {{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32) define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.h):<<1:sat +; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32) define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.l):rnd:sat +; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32) define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.l):<<1:rnd:sat +; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32) define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.h):rnd:sat +; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32) define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.l, r1.h):<<1:rnd:sat +; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32) define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.l):rnd:sat +; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32) define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.l):<<1:rnd:sat +; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32) define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.h):rnd:sat +; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd:sat declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32) define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0.h, r1.h):<<1:rnd:sat +; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32) define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.l) +; CHECK: += mpy({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32) define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.l):<<1 +; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32) define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.h) +; CHECK: += mpy({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32) define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.h):<<1 +; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32) define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.l) +; CHECK: += mpy({{.*}}.h, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32) define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.l):<<1 +; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32) define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.h) +; CHECK: += mpy({{.*}}.h, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32) define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.h):<<1 +; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.l):sat +; CHECK: += mpy({{.*}}.l, {{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.l):<<1:sat +; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.h):sat +; CHECK: += mpy({{.*}}.l, {{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.l, r2.h):<<1:sat +; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.l):sat +; CHECK: += mpy({{.*}}.h, {{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.l):<<1:sat +; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32) define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.h):sat +; CHECK: += mpy({{.*}}.h, {{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32) define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1.h, r2.h):<<1:sat +; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32) define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.l) +; CHECK: -= mpy({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32) define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.l):<<1 +; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32) define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.h) +; CHECK: -= mpy({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32) define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.h):<<1 +; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32) define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.l) +; CHECK: -= mpy({{.*}}.h, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32) define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.l):<<1 +; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32) define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.h) +; CHECK: -= mpy({{.*}}.h, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32) define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.h):<<1 +; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.l):sat +; CHECK: -= mpy({{.*}}.l, {{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.l):<<1:sat +; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.h):sat +; CHECK: -= mpy({{.*}}.l, {{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.l, r2.h):<<1:sat +; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.l):sat +; CHECK: -= mpy({{.*}}.h, {{.*}}.l):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.l):<<1:sat +; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32) define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.h):sat +; CHECK: -= mpy({{.*}}.h, {{.*}}.h):sat declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32) define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1.h, r2.h):<<1:sat +; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1:sat ; Multiply unsigned halfwords declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32) @@ -774,336 +778,336 @@ define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.l, r1.l) +; CHECK: = mpyu({{.*}}.l, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32) define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.l, r1.l):<<1 +; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32) define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.l, r1.h) +; CHECK: = mpyu({{.*}}.l, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32) define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.l, r1.h):<<1 +; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32) define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.h, r1.l) +; CHECK: = mpyu({{.*}}.h, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32) define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.h, r1.l):<<1 +; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32) define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.h, r1.h) +; CHECK: = mpyu({{.*}}.h, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32) define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0.h, r1.h):<<1 +; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32) define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.l, r3.l) +; CHECK: += mpyu({{.*}}.l, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32) define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.l, r3.l):<<1 +; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32) define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.l, r3.h) +; CHECK: += mpyu({{.*}}.l, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32) define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.l, r3.h):<<1 +; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32) define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.h, r3.l) +; CHECK: += mpyu({{.*}}.h, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32) define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.h, r3.l):<<1 +; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32) define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.h, r3.h) +; CHECK: += mpyu({{.*}}.h, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32) define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2.h, r3.h):<<1 +; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32) define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.l, r3.l) +; CHECK: -= mpyu({{.*}}.l, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32) define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.l, r3.l):<<1 +; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32) define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.l, r3.h) +; CHECK: -= mpyu({{.*}}.l, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32) define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.l, r3.h):<<1 +; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32) define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.h, r3.l) +; CHECK: -= mpyu({{.*}}.h, {{.*}}.l) declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32) define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.h, r3.l):<<1 +; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1 declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32) define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.h, r3.h) +; CHECK: -= mpyu({{.*}}.h, {{.*}}.h) declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32) define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2.h, r3.h):<<1 +; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32) define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.l, r1.l) +; CHECK: = mpyu({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32) define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.l, r1.l):<<1 +; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32) define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.l, r1.h) +; CHECK: = mpyu({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32) define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.l, r1.h):<<1 +; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32) define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.h, r1.l) +; CHECK: = mpyu({{.*}}.h, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32) define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.h, r1.l):<<1 +; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32) define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.h, r1.h) +; CHECK: = mpyu({{.*}}.h, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32) define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0.h, r1.h):<<1 +; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32) define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.l, r2.l) +; CHECK: += mpyu({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32) define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.l, r2.l):<<1 +; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32) define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.l, r2.h) +; CHECK: += mpyu({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32) define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.l, r2.h):<<1 +; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32) define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.h, r2.l) +; CHECK: += mpyu({{.*}}.h, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32) define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.h, r2.l):<<1 +; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32) define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.h, r2.h) +; CHECK: += mpyu({{.*}}.h, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32) define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpyu(r1.h, r2.h):<<1 +; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32) define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.l, r2.l) +; CHECK: -= mpyu({{.*}}.l, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32) define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.l, r2.l):<<1 +; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32) define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.l, r2.h) +; CHECK: -= mpyu({{.*}}.l, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32) define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.l, r2.h):<<1 +; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32) define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.h, r2.l) +; CHECK: -= mpyu({{.*}}.h, {{.*}}.l) declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32) define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.h, r2.l):<<1 +; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1 declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32) define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.h, r2.h) +; CHECK: -= mpyu({{.*}}.h, {{.*}}.h) declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32) define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpyu(r1.h, r2.h):<<1 +; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1 ; Polynomial multiply words declare i64 @llvm.hexagon.M4.pmpyw(i32, i32) @@ -1111,14 +1115,14 @@ define i64 @M4_pmpyw(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = pmpyw(r0, r1) +; CHECK: = pmpyw({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32) define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 ^= pmpyw(r2, r3) +; CHECK: ^= pmpyw({{.*}}, {{.*}}) ; Vector reduce multiply word by signed half declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64) @@ -1126,56 +1130,56 @@ define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpywoh(r1:0, r3:2) +; CHECK: = vrmpywoh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64) define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpywoh(r1:0, r3:2):<<1 +; CHECK: = vrmpywoh({{.*}}, {{.*}}):<<1 declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64) define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpyweh(r1:0, r3:2) +; CHECK: = vrmpyweh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64) define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpyweh(r1:0, r3:2):<<1 +; CHECK: = vrmpyweh({{.*}}, {{.*}}):<<1 declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64) define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpywoh(r3:2, r5:4) +; CHECK: += vrmpywoh({{.*}}, r5:4) declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64) define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpywoh(r3:2, r5:4):<<1 +; CHECK: += vrmpywoh({{.*}}, r5:4):<<1 declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64) define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpyweh(r3:2, r5:4) +; CHECK: += vrmpyweh({{.*}}, r5:4) declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64) define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpyweh(r3:2, r5:4):<<1 +; CHECK: += vrmpyweh({{.*}}, r5:4):<<1 ; Multiply and use upper result declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32) @@ -1183,84 +1187,84 @@ define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1):rnd +; CHECK: = mpy({{.*}}, {{.*}}):rnd declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32) define i32 @M2_mpyu_up(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpyu(r0, r1) +; CHECK: = mpyu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32) define i32 @M2_mpysu_up(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpysu(r0, r1) +; CHECK: = mpysu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32) define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1.h):<<1:sat +; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32) define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1.l):<<1:sat +; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32) define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1.h):<<1:rnd:sat +; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32) define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1):<<1:sat +; CHECK: = mpy({{.*}}, {{.*}}):<<1:sat declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32) define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1.l):<<1:rnd:sat +; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:rnd:sat declare i32 @llvm.hexagon.M2.mpy.up(i32, i32) define i32 @M2_mpy_up(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1) +; CHECK: = mpy({{.*}}, {{.*}}) declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32) define i32 @M2_mpy_up_s1(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = mpy(r0, r1):<<1 +; CHECK: = mpy({{.*}}, {{.*}}):<<1 declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32) define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += mpy(r1, r2):<<1:sat +; CHECK: += mpy({{.*}}, {{.*}}):<<1:sat declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32) define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= mpy(r1, r2):<<1:sat +; CHECK: -= mpy({{.*}}, {{.*}}):<<1:sat ; Multiply and use full result declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32) @@ -1268,42 +1272,42 @@ define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpy(r0, r1) +; CHECK: = mpy({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32) define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = mpyu(r0, r1) +; CHECK: = mpyu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32) define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpy(r2, r3) +; CHECK: += mpy({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32) define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpy(r2, r3) +; CHECK: -= mpy({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32) define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += mpyu(r2, r3) +; CHECK: += mpyu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32) define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= mpyu(r2, r3) +; CHECK: -= mpyu({{.*}}, {{.*}}) ; Vector dual multiply declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64) @@ -1311,14 +1315,14 @@ define i64 @M2_vdmpys_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vdmpy(r1:0, r3:2):sat +; CHECK: = vdmpy({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64) define i64 @M2_vdmpys_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vdmpy(r1:0, r3:2):<<1:sat +; CHECK: = vdmpy({{.*}}, {{.*}}):<<1:sat ; Vector reduce multiply bytes declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64) @@ -1326,28 +1330,28 @@ define i64 @M5_vrmpybuu(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpybu(r1:0, r3:2) +; CHECK: = vrmpybu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64) define i64 @M5_vrmpybsu(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpybsu(r1:0, r3:2) +; CHECK: = vrmpybsu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64) define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpybu(r3:2, r5:4) +; CHECK: += vrmpybu({{.*}}, r5:4) declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64) define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpybsu(r3:2, r5:4) +; CHECK: += vrmpybsu({{.*}}, r5:4) ; Vector dual multiply signed by unsigned bytes declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64) @@ -1355,14 +1359,14 @@ define i64 @M5_vdmpybsu(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vdmpybsu(r1:0, r3:2):sat +; CHECK: = vdmpybsu({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64) define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vdmpybsu(r3:2, r5:4):sat +; CHECK: += vdmpybsu({{.*}}, r5:4):sat ; Vector multiply even halfwords declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64) @@ -1370,35 +1374,35 @@ define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyeh(r1:0, r3:2):sat +; CHECK: = vmpyeh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64) define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vmpyeh(r1:0, r3:2):<<1:sat +; CHECK: = vmpyeh({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64) define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vmpyeh(r3:2, r5:4) +; CHECK: += vmpyeh({{.*}}, r5:4) declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64) define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vmpyeh(r3:2, r5:4):sat +; CHECK: += vmpyeh({{.*}}, r5:4):sat declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64) define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vmpyeh(r3:2, r5:4):<<1:sat +; CHECK: += vmpyeh({{.*}}, r5:4):<<1:sat ; Vector multiply halfwords declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32) @@ -1406,35 +1410,35 @@ define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vmpyh(r0, r1):sat +; CHECK: = vmpyh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32) define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vmpyh(r0, r1):<<1:sat +; CHECK: = vmpyh({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32) define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpyh(r2, r3) +; CHECK: += vmpyh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32) define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpyh(r2, r3):sat +; CHECK: += vmpyh({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32) define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpyh(r2, r3):<<1:sat +; CHECK: += vmpyh({{.*}}, {{.*}}):<<1:sat ; Vector multiply halfwords signed by unsigned declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32) @@ -1442,28 +1446,28 @@ define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vmpyhsu(r0, r1):sat +; CHECK: = vmpyhsu({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32) define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vmpyhsu(r0, r1):<<1:sat +; CHECK: = vmpyhsu({{.*}}, {{.*}}):<<1:sat declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32) define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpyhsu(r2, r3):sat +; CHECK: += vmpyhsu({{.*}}, {{.*}}):sat declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32) define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpyhsu(r2, r3):<<1:sat +; CHECK: += vmpyhsu({{.*}}, {{.*}}):<<1:sat ; Vector reduce multiply halfwords declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64) @@ -1471,14 +1475,14 @@ define i64 @M2_vrmpy_s0(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vrmpyh(r1:0, r3:2) +; CHECK: = vrmpyh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64) define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: r1:0 += vrmpyh(r3:2, r5:4) +; CHECK: += vrmpyh({{.*}}, r5:4) ; Vector multiply bytes declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32) @@ -1486,28 +1490,28 @@ define i64 @M2_vmpybsu(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vmpybsu(r0, r1) +; CHECK: = vmpybsu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32) define i64 @M2_vmpybuu(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vmpybu(r0, r1) +; CHECK: = vmpybu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32) define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpybu(r2, r3) +; CHECK: += vmpybu({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32) define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += vmpybsu(r2, r3) +; CHECK: += vmpybsu({{.*}}, {{.*}}) ; Vector polynomial multiply halfwords declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32) @@ -1515,11 +1519,11 @@ define i64 @M4_vpmpyh(i32 %a, i32 %b) { %z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vpmpyh(r0, r1) +; CHECK: = vpmpyh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32) define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) { %z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 ^= vpmpyh(r2, r3) +; CHECK: ^= vpmpyh({{.*}}, {{.*}}) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll index 0b76132..3e044e3 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll @@ -1,41 +1,44 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM +; CHECK-CALL-NOT: call + ; Saturate declare i32 @llvm.hexagon.A2.sat(i64) define i32 @A2_sat(i64 %a) { %z = call i32 @llvm.hexagon.A2.sat(i64 %a) ret i32 %z } -; CHECK: r0 = sat(r1:0) +; CHECK: = sat({{.*}}) declare i32 @llvm.hexagon.A2.sath(i32) define i32 @A2_sath(i32 %a) { %z = call i32 @llvm.hexagon.A2.sath(i32 %a) ret i32 %z } -; CHECK: r0 = sath(r0) +; CHECK: = sath({{.*}}) declare i32 @llvm.hexagon.A2.satuh(i32) define i32 @A2_satuh(i32 %a) { %z = call i32 @llvm.hexagon.A2.satuh(i32 %a) ret i32 %z } -; CHECK: r0 = satuh(r0) +; CHECK: = satuh({{.*}}) declare i32 @llvm.hexagon.A2.satub(i32) define i32 @A2_satub(i32 %a) { %z = call i32 @llvm.hexagon.A2.satub(i32 %a) ret i32 %z } -; CHECK: r0 = satub(r0) +; CHECK: = satub({{.*}}) declare i32 @llvm.hexagon.A2.satb(i32) define i32 @A2_satb(i32 %a) { %z = call i32 @llvm.hexagon.A2.satb(i32 %a) ret i32 %z } -; CHECK: r0 = satb(r0) +; CHECK: = satb({{.*}}) ; Swizzle bytes declare i32 @llvm.hexagon.A2.swiz(i32) @@ -43,7 +46,7 @@ define i32 @A2_swiz(i32 %a) { %z = call i32 @llvm.hexagon.A2.swiz(i32 %a) ret i32 %z } -; CHECK: r0 = swiz(r0) +; CHECK: = swiz({{.*}}) ; Vector round and pack declare i32 @llvm.hexagon.S2.vrndpackwh(i64) @@ -51,14 +54,14 @@ define i32 @S2_vrndpackwh(i64 %a) { %z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a) ret i32 %z } -; CHECK: r0 = vrndwh(r1:0) +; CHECK: = vrndwh({{.*}}) declare i32 @llvm.hexagon.S2.vrndpackwhs(i64) define i32 @S2_vrndpackwhs(i64 %a) { %z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a) ret i32 %z } -; CHECK: r0 = vrndwh(r1:0):sat +; CHECK: = vrndwh({{.*}}):sat ; Vector saturate and pack declare i32 @llvm.hexagon.S2.vsathub(i64) @@ -66,42 +69,42 @@ define i32 @S2_vsathub(i64 %a) { %z = call i32 @llvm.hexagon.S2.vsathub(i64 %a) ret i32 %z } -; CHECK: r0 = vsathub(r1:0) +; CHECK: = vsathub({{.*}}) declare i32 @llvm.hexagon.S2.vsatwh(i64) define i32 @S2_vsatwh(i64 %a) { %z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a) ret i32 %z } -; CHECK: r0 = vsatwh(r1:0) +; CHECK: = vsatwh({{.*}}) declare i32 @llvm.hexagon.S2.vsatwuh(i64) define i32 @S2_vsatwuh(i64 %a) { %z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a) ret i32 %z } -; CHECK: r0 = vsatwuh(r1:0) +; CHECK: = vsatwuh({{.*}}) declare i32 @llvm.hexagon.S2.vsathb(i64) define i32 @S2_vsathb(i64 %a) { %z = call i32 @llvm.hexagon.S2.vsathb(i64 %a) ret i32 %z } -; CHECK: r0 = vsathb(r1:0) +; CHECK: = vsathb({{.*}}) declare i32 @llvm.hexagon.S2.svsathb(i32) define i32 @S2_svsathb(i32 %a) { %z = call i32 @llvm.hexagon.S2.svsathb(i32 %a) ret i32 %z } -; CHECK: r0 = vsathb(r0) +; CHECK: = vsathb({{.*}}) declare i32 @llvm.hexagon.S2.svsathub(i32) define i32 @S2_svsathub(i32 %a) { %z = call i32 @llvm.hexagon.S2.svsathub(i32 %a) ret i32 %z } -; CHECK: r0 = vsathub(r0) +; CHECK: = vsathub({{.*}}) ; Vector saturate without pack declare i64 @llvm.hexagon.S2.vsathub.nopack(i64) @@ -109,28 +112,28 @@ define i64 @S2_vsathub_nopack(i64 %a) { %z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a) ret i64 %z } -; CHECK: r1:0 = vsathub(r1:0) +; CHECK: = vsathub({{.*}}) declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64) define i64 @S2_vsatwuh_nopack(i64 %a) { %z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a) ret i64 %z } -; CHECK: r1:0 = vsatwuh(r1:0) +; CHECK: = vsatwuh({{.*}}) declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64) define i64 @S2_vsatwh_nopack(i64 %a) { %z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a) ret i64 %z } -; CHECK: r1:0 = vsatwh(r1:0) +; CHECK: = vsatwh({{.*}}) declare i64 @llvm.hexagon.S2.vsathb.nopack(i64) define i64 @S2_vsathb_nopack(i64 %a) { %z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a) ret i64 %z } -; CHECK: r1:0 = vsathb(r1:0) +; CHECK: = vsathb({{.*}}) ; Vector shuffle declare i64 @llvm.hexagon.S2.shuffeb(i64, i64) @@ -138,28 +141,28 @@ define i64 @S2_shuffeb(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = shuffeb(r1:0, r3:2) +; CHECK: = shuffeb({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.shuffob(i64, i64) define i64 @S2_shuffob(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = shuffob(r1:0, r3:2) +; CHECK: = shuffob({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.shuffeh(i64, i64) define i64 @S2_shuffeh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = shuffeh(r1:0, r3:2) +; CHECK: = shuffeh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.shuffoh(i64, i64) define i64 @S2_shuffoh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = shuffoh(r1:0, r3:2) +; CHECK: = shuffoh({{.*}}, {{.*}}) ; Vector splat bytes declare i32 @llvm.hexagon.S2.vsplatrb(i32) @@ -167,7 +170,7 @@ define i32 @S2_vsplatrb(i32 %a) { %z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a) ret i32 %z } -; CHECK: r0 = vsplatb(r0) +; CHECK: = vsplatb({{.*}}) ; Vector splat halfwords declare i64 @llvm.hexagon.S2.vsplatrh(i32) @@ -175,7 +178,7 @@ define i64 @S2_vsplatrh(i32 %a) { %z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a) ret i64 %z } -; CHECK: = vsplath(r0) +; CHECK: = vsplath({{.*}}) ; Vector splice declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32) @@ -183,14 +186,14 @@ define i64 @S2_vspliceib(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 = vspliceb(r1:0, r3:2, #0) +; CHECK: = vspliceb({{.*}}, {{.*}}, #0) declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32) define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 = vspliceb(r1:0, r3:2, p0) +; CHECK: = vspliceb({{.*}}, {{.*}}, {{.*}}) ; Vector sign extend declare i64 @llvm.hexagon.S2.vsxtbh(i32) @@ -198,14 +201,14 @@ define i64 @S2_vsxtbh(i32 %a) { %z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a) ret i64 %z } -; CHECK: = vsxtbh(r0) +; CHECK: = vsxtbh({{.*}}) declare i64 @llvm.hexagon.S2.vsxthw(i32) define i64 @S2_vsxthw(i32 %a) { %z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a) ret i64 %z } -; CHECK: = vsxthw(r0) +; CHECK: = vsxthw({{.*}}) ; Vector truncate declare i32 @llvm.hexagon.S2.vtrunohb(i64) @@ -213,28 +216,28 @@ define i32 @S2_vtrunohb(i64 %a) { %z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a) ret i32 %z } -; CHECK: r0 = vtrunohb(r1:0) +; CHECK: = vtrunohb({{.*}}) declare i32 @llvm.hexagon.S2.vtrunehb(i64) define i32 @S2_vtrunehb(i64 %a) { %z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a) ret i32 %z } -; CHECK: r0 = vtrunehb(r1:0) +; CHECK: = vtrunehb({{.*}}) declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64) define i64 @S2_vtrunowh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vtrunowh(r1:0, r3:2) +; CHECK: = vtrunowh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64) define i64 @S2_vtrunewh(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b) ret i64 %z } -; CHECK: r1:0 = vtrunewh(r1:0, r3:2) +; CHECK: = vtrunewh({{.*}}, {{.*}}) ; Vector zero extend declare i64 @llvm.hexagon.S2.vzxtbh(i32) @@ -242,11 +245,11 @@ define i64 @S2_vzxtbh(i32 %a) { %z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a) ret i64 %z } -; CHECK: = vzxtbh(r0) +; CHECK: = vzxtbh({{.*}}) declare i64 @llvm.hexagon.S2.vzxthw(i32) define i64 @S2_vzxthw(i32 %a) { %z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a) ret i64 %z } -; CHECK: = vzxthw(r0) +; CHECK: = vzxthw({{.*}}) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll index 96e63d8..f06339b 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll @@ -1,48 +1,51 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.7 XTYPE/PRED +; CHECK-CALL-NOT: call + ; Compare byte declare i32 @llvm.hexagon.A4.cmpbgt(i32, i32) define i32 @A4_cmpbgt(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = cmpb.gt(r0, r1) +; CHECK: = cmpb.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32) define i32 @A4_cmpbeq(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = cmpb.eq(r0, r1) +; CHECK: = cmpb.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32) define i32 @A4_cmpbgtu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = cmpb.gtu(r0, r1) +; CHECK: = cmpb.gtu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32) define i32 @A4_cmpbgti(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = cmpb.gt(r0, #0) +; CHECK: = cmpb.gt({{.*}}, #0) declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32) define i32 @A4_cmpbeqi(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = cmpb.eq(r0, #0) +; CHECK: = cmpb.eq({{.*}}, #0) declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32) define i32 @A4_cmpbgtui(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = cmpb.gtu(r0, #0) +; CHECK: = cmpb.gtu({{.*}}, #0) ; Compare half declare i32 @llvm.hexagon.A4.cmphgt(i32, i32) @@ -50,42 +53,42 @@ define i32 @A4_cmphgt(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = cmph.gt(r0, r1) +; CHECK: = cmph.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.cmpheq(i32, i32) define i32 @A4_cmpheq(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = cmph.eq(r0, r1) +; CHECK: = cmph.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32) define i32 @A4_cmphgtu(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = cmph.gtu(r0, r1) +; CHECK: = cmph.gtu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.cmphgti(i32, i32) define i32 @A4_cmphgti(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = cmph.gt(r0, #0) +; CHECK: = cmph.gt({{.*}}, #0) declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32) define i32 @A4_cmpheqi(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = cmph.eq(r0, #0) +; CHECK: = cmph.eq({{.*}}, #0) declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32) define i32 @A4_cmphgtui(i32 %a) { %z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = cmph.gtu(r0, #0) +; CHECK: = cmph.gtu({{.*}}, #0) ; Compare doublewords declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64) @@ -93,21 +96,21 @@ define i32 @C2_cmpgtp(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = cmp.gt(r1:0, r3:2) +; CHECK: = cmp.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64) define i32 @C2_cmpeqp(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = cmp.eq(r1:0, r3:2) +; CHECK: = cmp.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64) define i32 @C2_cmpgtup(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = cmp.gtu(r1:0, r3:2) +; CHECK: = cmp.gtu({{.*}}, {{.*}}) ; Compare bitmask declare i32 @llvm.hexagon.C2.bitsclri(i32, i32) @@ -115,42 +118,42 @@ define i32 @C2_bitsclri(i32 %a) { %z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = bitsclr(r0, #0) +; CHECK: = bitsclr({{.*}}, #0) declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32) define i32 @C4_nbitsclri(i32 %a) { %z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = !bitsclr(r0, #0) +; CHECK: = !bitsclr({{.*}}, #0) declare i32 @llvm.hexagon.C2.bitsset(i32, i32) define i32 @C2_bitsset(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = bitsset(r0, r1) +; CHECK: = bitsset({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C4.nbitsset(i32, i32) define i32 @C4_nbitsset(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = !bitsset(r0, r1) +; CHECK: = !bitsset({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C2.bitsclr(i32, i32) define i32 @C2_bitsclr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = bitsclr(r0, r1) +; CHECK: = bitsclr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32) define i32 @C4_nbitsclr(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = !bitsclr(r0, r1) +; CHECK: = !bitsclr({{.*}}, {{.*}}) ; Mask generate from predicate declare i64 @llvm.hexagon.C2.mask(i32) @@ -158,7 +161,7 @@ define i64 @C2_mask(i32 %a) { %z = call i64 @llvm.hexagon.C2.mask(i32 %a) ret i64 %z } -; CHECK: = mask(p0) +; CHECK: = mask({{.*}}) ; Check for TLB match declare i32 @llvm.hexagon.A4.tlbmatch(i64, i32) @@ -166,7 +169,7 @@ define i32 @A4_tlbmatch(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b) ret i32 %z } -; CHECK: p0 = tlbmatch(r1:0, r2) +; CHECK: = tlbmatch({{.*}}, {{.*}}) ; Test bit declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32) @@ -174,28 +177,28 @@ define i32 @S2_tstbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = tstbit(r0, #0) +; CHECK: = tstbit({{.*}}, #0) declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32) define i32 @S4_ntstbit_i(i32 %a) { %z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0) ret i32 %z } -; CHECK: p0 = !tstbit(r0, #0) +; CHECK: = !tstbit({{.*}}, #0) declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32) define i32 @S2_tstbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = tstbit(r0, r1) +; CHECK: = tstbit({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32) define i32 @S4_ntstbit_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: p0 = !tstbit(r0, r1) +; CHECK: = !tstbit({{.*}}, {{.*}}) ; Vector compare halfwords declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64) @@ -203,42 +206,42 @@ define i32 @A2_vcmpheq(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmph.eq(r1:0, r3:2) +; CHECK: = vcmph.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64) define i32 @A2_vcmphgt(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmph.gt(r1:0, r3:2) +; CHECK: = vcmph.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64) define i32 @A2_vcmphgtu(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmph.gtu(r1:0, r3:2) +; CHECK: = vcmph.gtu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32) define i32 @A4_vcmpheqi(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmph.eq(r1:0, #0) +; CHECK: = vcmph.eq({{.*}}, #0) declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32) define i32 @A4_vcmphgti(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmph.gt(r1:0, #0) +; CHECK: = vcmph.gt({{.*}}, #0) declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32) define i32 @A4_vcmphgtui(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmph.gtu(r1:0, #0) +; CHECK: = vcmph.gtu({{.*}}, #0) ; Vector compare bytes for any match declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64) @@ -246,7 +249,7 @@ define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = any8(vcmpb.eq(r1:0, r3:2)) +; CHECK: = any8(vcmpb.eq({{.*}}, {{.*}})) ; Vector compare bytes declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64) @@ -254,42 +257,42 @@ define i32 @A2_vcmpbeq(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmpb.eq(r1:0, r3:2) +; CHECK: = vcmpb.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64) define i32 @A2_vcmpbgtu(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmpb.gtu(r1:0, r3:2) +; CHECK: = vcmpb.gtu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64) define i32 @A4_vcmpbgt(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmpb.gt(r1:0, r3:2) +; CHECK: = vcmpb.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32) define i32 @A4_vcmpbeqi(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmpb.eq(r1:0, #0) +; CHECK: = vcmpb.eq({{.*}}, #0) declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32) define i32 @A4_vcmpbgti(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmpb.gt(r1:0, #0) +; CHECK: = vcmpb.gt({{.*}}, #0) declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32) define i32 @A4_vcmpbgtui(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmpb.gtu(r1:0, #0) +; CHECK: = vcmpb.gtu({{.*}}, #0) ; Vector compare words declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64) @@ -297,42 +300,42 @@ define i32 @A2_vcmpweq(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmpw.eq(r1:0, r3:2) +; CHECK: = vcmpw.eq({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64) define i32 @A2_vcmpwgt(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmpw.gt(r1:0, r3:2) +; CHECK: = vcmpw.gt({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64) define i32 @A2_vcmpwgtu(i64 %a, i64 %b) { %z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b) ret i32 %z } -; CHECK: p0 = vcmpw.gtu(r1:0, r3:2) +; CHECK: = vcmpw.gtu({{.*}}, {{.*}}) declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32) define i32 @A4_vcmpweqi(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmpw.eq(r1:0, #0) +; CHECK: = vcmpw.eq({{.*}}, #0) declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32) define i32 @A4_vcmpwgti(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmpw.gt(r1:0, #0) +; CHECK: = vcmpw.gt({{.*}}, #0) declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32) define i32 @A4_vcmpwgtui(i64 %a) { %z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0) ret i32 %z } -; CHECK: p0 = vcmpw.gtu(r1:0, #0) +; CHECK: = vcmpw.gtu({{.*}}, #0) ; Viterbi pack even and odd predicate bitsclr declare i32 @llvm.hexagon.C2.vitpack(i32, i32) @@ -340,7 +343,7 @@ define i32 @C2_vitpack(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vitpack(p1, p0) +; CHECK: = vitpack({{.*}}, {{.*}}) ; Vector mux declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64) @@ -348,4 +351,4 @@ define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) { %z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c) ret i64 %z } -; CHECK: = vmux(p0, r3:2, r5:4) +; CHECK: = vmux({{.*}}, {{.*}}, {{.*}}) diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll index c84999b..1a65f44 100644 --- a/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll +++ b/llvm/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll @@ -1,48 +1,51 @@ ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s ; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT +; CHECK-CALL-NOT: call + ; Shift by immediate declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) define i64 @S2_asr_i_p(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = asr(r1:0, #0) +; CHECK: = asr({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32) define i64 @S2_lsr_i_p(i64 %a) { %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = lsr(r1:0, #0) +; CHECK: = lsr({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32) define i64 @S2_asl_i_p(i64 %a) { %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = asl(r1:0, #0) +; CHECK: = asl({{.*}}, #0) declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32) define i32 @S2_asr_i_r(i32 %a) { %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = asr(r0, #0) +; CHECK: = asr({{.*}}, #0) declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32) define i32 @S2_lsr_i_r(i32 %a) { %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = lsr(r0, #0) +; CHECK: = lsr({{.*}}, #0) declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32) define i32 @S2_asl_i_r(i32 %a) { %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = asl(r0, #0) +; CHECK: = asl({{.*}}, #0) ; Shift by immediate and accumulate declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32) @@ -50,84 +53,84 @@ define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 -= asr(r3:2, #0) +; CHECK: -= asr({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32) define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 -= lsr(r3:2, #0) +; CHECK: -= lsr({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32) define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 -= asl(r3:2, #0) +; CHECK: -= asl({{.*}}, #0) declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32) define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 += asr(r3:2, #0) +; CHECK: += asr({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32) define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 += lsr(r3:2, #0) +; CHECK: += lsr({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32) define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 += asl(r3:2, #0) +; CHECK: += asl({{.*}}, #0) declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32) define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 -= asr(r1, #0) +; CHECK: -= asr({{.*}}, #0) declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32) define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 -= lsr(r1, #0) +; CHECK: -= lsr({{.*}}, #0) declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32) define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 -= asl(r1, #0) +; CHECK: -= asl({{.*}}, #0) declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32) define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 += asr(r1, #0) +; CHECK: += asr({{.*}}, #0) declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32) define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 += lsr(r1, #0) +; CHECK: += lsr({{.*}}, #0) declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32) define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 += asl(r1, #0) +; CHECK: += asl({{.*}}, #0) ; Shift by immediate and add declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32) @@ -135,35 +138,35 @@ define i32 @S4_addi_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = add(#0, asl(r0, #0)) +; CHECK: = add(#0, asl({{.*}}, #0)) declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32) define i32 @S4_subi_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = sub(#0, asl(r0, #0)) +; CHECK: = sub(#0, asl({{.*}}, #0)) declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32) define i32 @S4_addi_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = add(#0, lsr(r0, #0)) +; CHECK: = add(#0, lsr({{.*}}, #0)) declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32) define i32 @S4_subi_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = sub(#0, lsr(r0, #0)) +; CHECK: = sub(#0, lsr({{.*}}, #0)) declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32) define i32 @S2_addasl_rrri(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 = addasl(r0, r1, #0) +; CHECK: = addasl({{.*}}, {{.*}}, #0) ; Shift by immediate and logical declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32) @@ -171,140 +174,140 @@ define i64 @S2_asr_i_p_and(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 &= asr(r3:2, #0) +; CHECK: &= asr({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32) define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 &= lsr(r3:2, #0) +; CHECK: {{.*}} &= lsr({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32) define i64 @S2_asl_i_p_and(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 &= asl(r3:2, #0) +; CHECK: &= asl({{.*}}, #0) declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32) define i64 @S2_asr_i_p_or(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 |= asr(r3:2, #0) +; CHECK: |= asr({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32) define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 |= lsr(r3:2, #0) +; CHECK: |= lsr({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32) define i64 @S2_asl_i_p_or(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 |= asl(r3:2, #0) +; CHECK: |= asl({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32) define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 ^= lsr(r3:2, #0) +; CHECK: ^= lsr({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32) define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) { %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0) ret i64 %z } -; CHECK: r1:0 ^= asl(r3:2, #0) +; CHECK: ^= asl({{.*}}, #0) declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32) define i32 @S2_asr_i_r_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 &= asr(r1, #0) +; CHECK: &= asr({{.*}}, #0) declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32) define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 &= lsr(r1, #0) +; CHECK: &= lsr({{.*}}, #0) declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32) define i32 @S2_asl_i_r_and(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 &= asl(r1, #0) +; CHECK: &= asl({{.*}}, #0) declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32) define i32 @S2_asr_i_r_or(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 |= asr(r1, #0) +; CHECK: |= asr({{.*}}, #0) declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32) define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 |= lsr(r1, #0) +; CHECK: |= lsr({{.*}}, #0) declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32) define i32 @S2_asl_i_r_or(i32%a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 |= asl(r1, #0) +; CHECK: |= asl({{.*}}, #0) declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32) define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 ^= lsr(r1, #0) +; CHECK: ^= lsr({{.*}}, #0) declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32) define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0) ret i32 %z } -; CHECK: r0 ^= asl(r1, #0) +; CHECK: ^= asl({{.*}}, #0) declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32) define i32 @S4_andi_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = and(#0, asl(r0, #0)) +; CHECK: = and(#0, asl({{.*}}, #0)) declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32) define i32 @S4_ori_asl_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = or(#0, asl(r0, #0)) +; CHECK: = or(#0, asl({{.*}}, #0)) declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32) define i32 @S4_andi_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = and(#0, lsr(r0, #0)) +; CHECK: = and(#0, lsr({{.*}}, #0)) declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32) define i32 @S4_ori_lsr_ri(i32 %a) { %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = or(#0, lsr(r0, #0)) +; CHECK: = or(#0, lsr({{.*}}, #0)) ; Shift right by immediate with rounding declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32) @@ -312,14 +315,14 @@ define i64 @S2_asr_i_p_rnd(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = asr(r1:0, #0):rnd +; CHECK: = asr({{.*}}, #0):rnd declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32) define i32 @S2_asr_i_r_rnd(i32 %a) { %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = asr(r0, #0):rnd +; CHECK: = asr({{.*}}, #0):rnd ; Shift left by immediate with saturation declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32) @@ -327,7 +330,7 @@ define i32 @S2_asl_i_r_sat(i32 %a) { %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0) ret i32 %z } -; CHECK: r0 = asl(r0, #0):sat +; CHECK: = asl({{.*}}, #0):sat ; Shift by register declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32) @@ -335,63 +338,63 @@ define i64 @S2_asr_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = asr(r1:0, r2) +; CHECK: = asr({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32) define i64 @S2_lsr_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = lsr(r1:0, r2) +; CHECK: = lsr({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32) define i64 @S2_asl_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = asl(r1:0, r2) +; CHECK: = asl({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32) define i64 @S2_lsl_r_p(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = lsl(r1:0, r2) +; CHECK: = lsl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32) define i32 @S2_asr_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = asr(r0, r1) +; CHECK: = asr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32) define i32 @S2_lsr_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = lsr(r0, r1) +; CHECK: = lsr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) define i32 @S2_asl_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = asl(r0, r1) +; CHECK: = asl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32) define i32 @S2_lsl_r_r(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = lsl(r0, r1) +; CHECK: = lsl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S4.lsli(i32, i32) define i32 @S4_lsli(i32 %a) { %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a) ret i32 %z } -; CHECK: r0 = lsl(#0, r0) +; CHECK: = lsl(#0, {{.*}}) ; Shift by register and accumulate declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32) @@ -399,112 +402,112 @@ define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= asr(r3:2, r4) +; CHECK: -= asr({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32) define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= lsr(r3:2, r4) +; CHECK: -= lsr({{.*}}, r4) declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32) define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= asl(r3:2, r4) +; CHECK: -= asl({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32) define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 -= lsl(r3:2, r4) +; CHECK: -= lsl({{.*}}, r4) declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32) define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += asr(r3:2, r4) +; CHECK: += asr({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32) define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += lsr(r3:2, r4) +; CHECK: += lsr({{.*}}, r4) declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32) define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += asl(r3:2, r4) +; CHECK: += asl({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32) define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 += lsl(r3:2, r4) +; CHECK: += lsl({{.*}}, r4) declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32) define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= asr(r1, r2) +; CHECK: -= asr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32) define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= lsr(r1, r2) +; CHECK: -= lsr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32) define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= asl(r1, r2) +; CHECK: -= asl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32) define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 -= lsl(r1, r2) +; CHECK: -= lsl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32) define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += asr(r1, r2) +; CHECK: += asr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32) define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += lsr(r1, r2) +; CHECK: += lsr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32) define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += asl(r1, r2) +; CHECK: += asl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32) define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 += lsl(r1, r2) +; CHECK: += lsl({{.*}}, {{.*}}) ; Shift by register and logical declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32) @@ -512,112 +515,112 @@ define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 |= asr(r3:2, r4) +; CHECK: |= asr({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32) define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 |= lsr(r3:2, r4) +; CHECK: |= lsr({{.*}}, r4) declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32) define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 |= asl(r3:2, r4) +; CHECK: |= asl({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32) define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 |= lsl(r3:2, r4) +; CHECK: |= lsl({{.*}}, r4) declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32) define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 &= asr(r3:2, r4) +; CHECK: &= asr({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32) define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 &= lsr(r3:2, r4) +; CHECK: &= lsr({{.*}}, r4) declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32) define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 &= asl(r3:2, r4) +; CHECK: &= asl({{.*}}, r4) declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32) define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) { %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c) ret i64 %z } -; CHECK: r1:0 &= lsl(r3:2, r4) +; CHECK: &= lsl({{.*}}, r4) declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32) define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= asr(r1, r2) +; CHECK: |= asr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32) define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= lsr(r1, r2) +; CHECK: |= lsr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32) define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= asl(r1, r2) +; CHECK: |= asl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32) define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 |= lsl(r1, r2) +; CHECK: |= lsl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32) define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= asr(r1, r2) +; CHECK: &= asr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32) define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= lsr(r1, r2) +; CHECK: &= lsr({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32) define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= asl(r1, r2) +; CHECK: &= asl({{.*}}, {{.*}}) declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32) define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) { %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c) ret i32 %z } -; CHECK: r0 &= lsl(r1, r2) +; CHECK: &= lsl({{.*}}, {{.*}}) ; Shift by register with saturation declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) @@ -625,14 +628,14 @@ define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = asr(r0, r1):sat +; CHECK: = asr({{.*}}, {{.*}}):sat declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32) define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b) ret i32 %z } -; CHECK: r0 = asl(r0, r1):sat +; CHECK: = asl({{.*}}, {{.*}}):sat ; Vector shift halfwords by immediate declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32) @@ -640,21 +643,21 @@ define i64 @S2_asr_i_vh(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = vasrh(r1:0, #0) +; CHECK: = vasrh({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32) define i64 @S2_lsr_i_vh(i64 %a) { %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = vlsrh(r1:0, #0) +; CHECK: = vlsrh({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32) define i64 @S2_asl_i_vh(i64 %a) { %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = vaslh(r1:0, #0) +; CHECK: = vaslh({{.*}}, #0) ; Vector shift halfwords by register declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32) @@ -662,28 +665,28 @@ define i64 @S2_asr_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vasrh(r1:0, r2) +; CHECK: = vasrh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32) define i64 @S2_lsr_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vlsrh(r1:0, r2) +; CHECK: = vlsrh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32) define i64 @S2_asl_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vaslh(r1:0, r2) +; CHECK: = vaslh({{.*}}, {{.*}}) declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32) define i64 @S2_lsl_r_vh(i64 %a, i32 %b) { %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b) ret i64 %z } -; CHECK: r1:0 = vlslh(r1:0, r2) +; CHECK: = vlslh({{.*}}, {{.*}}) ; Vector shift words by immediate declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32) @@ -691,21 +694,21 @@ define i64 @S2_asr_i_vw(i64 %a) { %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = vasrw(r1:0, #0) +; CHECK: = vasrw({{.*}}, #0) declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32) define i64 @S2_lsr_i_vw(i64 %a) { %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = vlsrw(r1:0, #0) +; CHECK: = vlsrw({{.*}}, #0) declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32) define i64 @S2_asl_i_vw(i64 %a) { %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0) ret i64 %z } -; CHECK: r1:0 = vaslw(r1:0, #0) +; CHECK: = vaslw({{.*}}, #0) ; Vector shift words by with truncate and pack declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32) @@ -713,11 +716,11 @@ define i32 @S2_asr_i_svw_trun(i64 %a) { %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0) ret i32 %z } -; CHECK: r0 = vasrw(r1:0, #0) +; CHECK: = vasrw({{.*}}, #0) declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32) define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) { %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b) ret i32 %z } -; CHECK: r0 = vasrw(r1:0, r2) +; CHECK: = vasrw({{.*}}, {{.*}}) -- 2.7.4