Summary: Add, sub, left/right shift isel patterns and tests for i32/i64 and fp32/fp64.
Reviewed By: arsenm
Differential Revision: https://reviews.llvm.org/D73207
}
}
-// Multiclass for RR type instructions
-
multiclass RRmrr<string opcStr, bits<8>opc,
RegisterClass RCo, ValueType Tyo,
RegisterClass RCi, ValueType Tyi,
{ let cy = 0; let cz = 1; let hasSideEffects = 0; }
}
+multiclass RRmir<string opcStr, bits<8>opc,
+ RegisterClass RCo, ValueType Tyo,
+ RegisterClass RCi, ValueType Tyi, Operand immOp,
+ SDPatternOperator OpNode=null_frag> {
+ def ri : RR<opc, (outs RCo:$sx), (ins immOp:$sy, RCi:$sz),
+ !strconcat(opcStr, " $sx, $sy, $sz"),
+ [(set Tyo:$sx, (OpNode (Tyi simm7:$sy), Tyi:$sz))]>
+ { let cy = 0; let cz = 1; let hasSideEffects = 0; }
+}
+
multiclass RRmiz<string opcStr, bits<8>opc,
RegisterClass RCo, ValueType Tyo,
RegisterClass RCi, ValueType Tyi, Operand immOp,
// Used by add, mul, div, and similar commutative instructions
// The order of operands are "$sx, $sy, $sz"
-multiclass RRm<string opcStr, bits<8>opc, RegisterClass RC, ValueType Ty,
+multiclass RRm<string opcStr, bits<8>opc,
+ RegisterClass RC, ValueType Ty,
Operand immOp, Operand immOp2,
SDPatternOperator OpNode=null_frag> :
RRmrr<opcStr, opc, RC, Ty, RC, Ty, OpNode>,
RRNDmrm<opcStr, opc, RC, Ty, RC, Ty, immOp2>,
RRNDmim<opcStr, opc, RC, Ty, RC, Ty, immOp, immOp2>;
+// Used by sub, and similar not commutative instructions
+// The order of operands are "$sx, $sy, $sz"
+
+multiclass RRNCm<string opcStr, bits<8>opc,
+ RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2,
+ SDPatternOperator OpNode=null_frag> :
+ RRmrr<opcStr, opc, RC, Ty, RC, Ty, OpNode>,
+ RRmir<opcStr, opc, RC, Ty, RC, Ty, immOp, OpNode>,
+ RRmiz<opcStr, opc, RC, Ty, RC, Ty, immOp, OpNode>,
+ RRNDmrm<opcStr, opc, RC, Ty, RC, Ty, immOp2>,
+ RRNDmim<opcStr, opc, RC, Ty, RC, Ty, immOp, immOp2>;
// Multiclass for RR type instructions
// Used by sra, sla, sll, and similar instructions
// The order of operands are "$sx, $sz, $sy"
-multiclass RRIm<string opcStr, bits<8>opc, SDNode OpNode,
- RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> {
+multiclass RRIm<string opcStr, bits<8>opc,
+ RegisterClass RC, ValueType Ty,
+ Operand immOp, Operand immOp2,
+ SDPatternOperator OpNode=null_frag> {
+ def rr : RR<
+ opc, (outs RC:$sx), (ins RC:$sz, I32:$sy),
+ !strconcat(opcStr, " $sx, $sz, $sy"),
+ [(set Ty:$sx, (OpNode Ty:$sz, i32:$sy))]> {
+ let cy = 1;
+ let cz = 1;
+ let hasSideEffects = 0;
+ }
def ri : RR<
opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy),
!strconcat(opcStr, " $sx, $sz, $sy"),
let cz = 1;
let hasSideEffects = 0;
}
+ def rm0 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy),
+ !strconcat(opcStr, " $sx, (${sz})0, $sy")> {
+ let cy = 1;
+ let cz = 0;
+ let sz{6} = 1;
+ let hasSideEffects = 0;
+ }
+ def rm1 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy),
+ !strconcat(opcStr, " $sx, (${sz})1, $sy")> {
+ let cy = 1;
+ let cz = 0;
+ let hasSideEffects = 0;
+ }
+ def im0 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy),
+ !strconcat(opcStr, " $sx, (${sz})0, $sy")> {
+ let cy = 0;
+ let cz = 0;
+ let sz{6} = 1;
+ let hasSideEffects = 0;
+ }
+ def im1 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy),
+ !strconcat(opcStr, " $sx, (${sz})1, $sy")> {
+ let cy = 0;
+ let cz = 0;
+ let hasSideEffects = 0;
+ }
+ def zi : RR<
+ opc, (outs RC:$sx), (ins immOp:$sy),
+ !strconcat(opcStr, " $sx, $sy"),
+ [(set Ty:$sx, (OpNode 0, (i32 simm7:$sy)))]> {
+ let cy = 0;
+ let cz = 0;
+ let sz = 0;
+ let hasSideEffects = 0;
+ }
+}
+
+// Multiclass for RR type instructions without dag pattern
+// Used by sra.w.zx, sla.w.zx, and others
+
+multiclass RRINDm<string opcStr, bits<8>opc,
+ RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> {
+ def rr : RR<
+ opc, (outs RC:$sx), (ins RC:$sz, I32:$sy),
+ !strconcat(opcStr, " $sx, $sz, $sy")> {
+ let cy = 1;
+ let cz = 1;
+ let hasSideEffects = 0;
+ }
+ def ri : RR<
+ opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy),
+ !strconcat(opcStr, " $sx, $sz, $sy")> {
+ let cy = 0;
+ let cz = 1;
+ let hasSideEffects = 0;
+ }
+ def rm0 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy),
+ !strconcat(opcStr, " $sx, (${sz})0, $sy")> {
+ let cy = 1;
+ let cz = 0;
+ let sz{6} = 1;
+ let hasSideEffects = 0;
+ }
+ def rm1 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy),
+ !strconcat(opcStr, " $sx, (${sz})1, $sy")> {
+ let cy = 1;
+ let cz = 0;
+ let hasSideEffects = 0;
+ }
+ def im0 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy),
+ !strconcat(opcStr, " $sx, (${sz})0, $sy")> {
+ let cy = 0;
+ let cz = 0;
+ let sz{6} = 1;
+ let hasSideEffects = 0;
+ }
+ def im1 : RR<
+ opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy),
+ !strconcat(opcStr, " $sx, (${sz})1, $sy")> {
+ let cy = 0;
+ let cz = 0;
+ let hasSideEffects = 0;
+ }
+ def zi : RR<
+ opc, (outs RC:$sx), (ins immOp:$sy),
+ !strconcat(opcStr, " $sx, $sy")> {
+ let cy = 0;
+ let cz = 0;
+ let sz = 0;
+ let hasSideEffects = 0;
+ }
}
// Multiclass for RR type instructions
// LEA and LEASL instruction (load 32 bit imm to low or high part)
let cx = 0 in
-defm LEA : RMm<"lea", 0x06, I64, i64, simm7Op64, simm32Op64>;
+defm LEA : RMm<"lea", 0x06, I64, i64, simm7Op64, simm32Op64, add>;
let cx = 1 in
defm LEASL : RMm<"lea.sl", 0x06, I64, i64, simm7Op64, simm32Op64>;
let isCodeGenOnly = 1 in {
let cx = 0 in
-defm LEA32 : RMm<"lea", 0x06, I32, i32, simm7Op32, simm32Op32>;
+defm LEA32 : RMm<"lea", 0x06, I32, i32, simm7Op32, simm32Op32, add>;
}
// 5.3.2.2. Fixed-Point Arithmetic Operation Instructions
+// ADD instruction
+let cx = 0 in
+defm ADD : RRm<"addu.l", 0x48, I64, i64, simm7Op64, uimm6Op64>;
+let cx = 1 in
+defm ADDUW : RRm<"addu.w", 0x48, I32, i32, simm7Op32, uimm6Op32>;
+
// ADS instruction
let cx = 0 in
defm ADS : RRm<"adds.w.sx", 0x4A, I32, i32, simm7Op32, uimm6Op32, add>;
let cx = 1 in
-defm ADSU : RRm<"adds.w.zx", 0x4A, I32, i32, simm7Op32, uimm6Op32, add>;
+defm ADSU : RRm<"adds.w.zx", 0x4A, I32, i32, simm7Op32, uimm6Op32>;
// ADX instruction
let cx = 0 in
defm ADX : RRm<"adds.l", 0x59, I64, i64, simm7Op64, uimm6Op64, add>;
+// SUB instruction
+let cx = 0 in
+defm SUB : RRm<"subu.l", 0x58, I64, i64, simm7Op64, uimm6Op64>;
+let cx = 1 in
+defm SUBUW : RRm<"subu.w", 0x58, I32, i32, simm7Op32, uimm6Op32>;
+
+// SBS instruction
+let cx = 0 in
+defm SBS : RRNCm<"subs.w.sx", 0x5A, I32, i32, simm7Op32, uimm6Op32, sub>;
+let cx = 1 in
+defm SBSU : RRm<"subs.w.zx", 0x5A, I32, i32, simm7Op32, uimm6Op32>;
+
+// SBX instruction
+let cx = 0 in
+defm SBX : RRNCm<"subs.l", 0x5B, I64, i64, simm7Op64, uimm6Op64, sub>;
+
// CMP instruction
let cx = 0 in
defm CMP : RRm<"cmpu.l", 0x55, I64, i64, simm7Op64, uimm6Op64>;
let cx = 0 in {
defm AND : RRm<"and", 0x44, I64, i64, simm7Op64, uimm6Op64, and>;
defm OR : RRm<"or", 0x45, I64, i64, simm7Op64, uimm6Op64, or>;
+ defm XOR : RRm<"xor", 0x46, I64, i64, simm7Op64, uimm6Op64, xor>;
let isCodeGenOnly = 1 in {
defm AND32 : RRm<"and", 0x44, I32, i32, simm7Op32, uimm6Op32, and>;
defm OR32 : RRm<"or", 0x45, I32, i32, simm7Op32, uimm6Op32, or>;
// 5.3.2.4 Shift Instructions
let cx = 0 in
-defm SRAX : RRIm<"sra.l", 0x77, sra, I64, i64, simm7Op32, uimm6Op64>;
+defm SRAX : RRIm<"sra.l", 0x77, I64, i64, simm7Op32, uimm6Op64, sra>;
let cx = 0 in
-defm SRA : RRIm<"sra.w.sx", 0x76, sra, I32, i32, simm7Op32, uimm6Op32>;
+defm SRA : RRIm<"sra.w.sx", 0x76, I32, i32, simm7Op32, uimm6Op32, sra>;
+let cx = 1 in
+defm SRAU : RRINDm<"sra.w.zx", 0x76, I32, i32, simm7Op32, uimm6Op32>;
let cx = 0 in
-defm SLL : RRIm<"sll", 0x65, shl, I64, i64, simm7Op32, uimm6Op64>;
+defm SLL : RRIm<"sll", 0x65, I64, i64, simm7Op32, uimm6Op64, shl>;
let cx = 0 in
-defm SLA : RRIm<"sla.w.sx", 0x66, shl, I32, i32, simm7Op32, uimm6Op32>;
+defm SLA : RRIm<"sla.w.sx", 0x66, I32, i32, simm7Op32, uimm6Op32, shl>;
+let cx = 1 in
+defm SLAU : RRINDm<"sla.w.zx", 0x66, I32, i32, simm7Op32, uimm6Op32>;
+let cx = 0 in
+defm SRL : RRIm<"srl", 0x75, I64, i64, simm7Op32, uimm6Op64, srl>;
+
+def : Pat<(i32 (srl i32:$src, (i32 simm7:$val))),
+ (EXTRACT_SUBREG (SRLri (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ $src, sub_i32), 32), imm:$val), sub_i32)>;
+def : Pat<(i32 (srl i32:$src, i32:$val)),
+ (EXTRACT_SUBREG (SRLrr (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ $src, sub_i32), 32), $val), sub_i32)>;
+// 5.3.2.5. Floating-point Arithmetic Operation Instructions
// FCP instruction
let cx = 0 in
defm FCP : RRm<"fcmp.d", 0x7E, I64, f64, simm7Op64, uimm6Op64>;
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+define signext i8 @func1(i8 signext %0, i8 signext %1) {
+; CHECK-LABEL: func1:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s0, %s1, %s0
+; CHECK-NEXT: sla.w.sx %s0, %s0, 24
+; CHECK-NEXT: sra.w.sx %s0, %s0, 24
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i8 %1, %0
+ ret i8 %3
+}
+
+define signext i16 @func2(i16 signext %0, i16 signext %1) {
+; CHECK-LABEL: func2:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s0, %s1, %s0
+; CHECK-NEXT: sla.w.sx %s0, %s0, 16
+; CHECK-NEXT: sra.w.sx %s0, %s0, 16
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i16 %1, %0
+ ret i16 %3
+}
+
+define i32 @func3(i32 %0, i32 %1) {
+; CHECK-LABEL: func3:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s0, %s1, %s0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add nsw i32 %1, %0
+ ret i32 %3
+}
+
+define i64 @func4(i64 %0, i64 %1) {
+; CHECK-LABEL: func4:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.l %s0, %s1, %s0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add nsw i64 %1, %0
+ ret i64 %3
+}
+
+define zeroext i8 @func6(i8 zeroext %0, i8 zeroext %1) {
+; CHECK-LABEL: func6:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s0, %s1, %s0
+; CHECK-NEXT: and %s0, %s0, (56)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i8 %1, %0
+ ret i8 %3
+}
+
+define zeroext i16 @func7(i16 zeroext %0, i16 zeroext %1) {
+; CHECK-LABEL: func7:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s0, %s1, %s0
+; CHECK-NEXT: and %s0, %s0, (48)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i16 %1, %0
+ ret i16 %3
+}
+
+define i32 @func8(i32 %0, i32 %1) {
+; CHECK-LABEL: func8:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s0, %s1, %s0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i32 %1, %0
+ ret i32 %3
+}
+
+define i64 @func9(i64 %0, i64 %1) {
+; CHECK-LABEL: func9:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.l %s0, %s1, %s0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i64 %1, %0
+ ret i64 %3
+}
+
+define signext i8 @func13(i8 signext %0) {
+; CHECK-LABEL: func13:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: sla.w.sx %s0, %s0, 24
+; CHECK-NEXT: sra.w.sx %s0, %s0, 24
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add i8 %0, 5
+ ret i8 %2
+}
+
+define signext i16 @func14(i16 signext %0) {
+; CHECK-LABEL: func14:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: sla.w.sx %s0, %s0, 16
+; CHECK-NEXT: sra.w.sx %s0, %s0, 16
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add i16 %0, 5
+ ret i16 %2
+}
+
+define i32 @func15(i32 %0) {
+; CHECK-LABEL: func15:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add nsw i32 %0, 5
+ ret i32 %2
+}
+
+define i64 @func16(i64 %0) {
+; CHECK-LABEL: func16:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add nsw i64 %0, 5
+ ret i64 %2
+}
+
+define zeroext i8 @func18(i8 zeroext %0) {
+; CHECK-LABEL: func18:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: and %s0, %s0, (56)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add i8 %0, 5
+ ret i8 %2
+}
+
+define zeroext i16 @func19(i16 zeroext %0) {
+; CHECK-LABEL: func19:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: and %s0, %s0, (48)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add i16 %0, 5
+ ret i16 %2
+}
+
+define i32 @func20(i32 %0) {
+; CHECK-LABEL: func20:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add i32 %0, 5
+ ret i32 %2
+}
+
+define i64 @func21(i64 %0) {
+; CHECK-LABEL: func21:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add i64 %0, 5
+ ret i64 %2
+}
+
+define i32 @func25(i32 %0) {
+; CHECK-LABEL: func25:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, -2147483648
+; CHECK-NEXT: xor %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = xor i32 %0, -2147483648
+ ret i32 %2
+}
+
+define i64 @func26(i64 %0) {
+; CHECK-LABEL: func26:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, -2147483648
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: adds.l %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = add nsw i64 %0, 2147483648
+ ret i64 %2
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+define signext i8 @func1(i8 signext %0, i8 signext %1) {
+; CHECK-LABEL: func1:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, %s1
+; CHECK-NEXT: sla.w.sx %s0, %s0, 24
+; CHECK-NEXT: sra.w.sx %s0, %s0, 24
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sext i8 %0 to i32
+ %4 = sext i8 %1 to i32
+ %5 = shl i32 %3, %4
+ %6 = trunc i32 %5 to i8
+ ret i8 %6
+}
+
+define signext i16 @func2(i16 signext %0, i16 signext %1) {
+; CHECK-LABEL: func2:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, %s1
+; CHECK-NEXT: sla.w.sx %s0, %s0, 16
+; CHECK-NEXT: sra.w.sx %s0, %s0, 16
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sext i16 %0 to i32
+ %4 = sext i16 %1 to i32
+ %5 = shl i32 %3, %4
+ %6 = trunc i32 %5 to i16
+ ret i16 %6
+}
+
+define i32 @func3(i32 %0, i32 %1) {
+; CHECK-LABEL: func3:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = shl i32 %0, %1
+ ret i32 %3
+}
+
+define i64 @func4(i64 %0, i64 %1) {
+; CHECK-LABEL: func4:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s1, %s1, (0)1
+; CHECK-NEXT: sll %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = shl i64 %0, %1
+ ret i64 %3
+}
+
+define zeroext i8 @func6(i8 zeroext %0, i8 zeroext %1) {
+; CHECK-LABEL: func6:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, %s1
+; CHECK-NEXT: and %s0, %s0, (56)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = zext i8 %0 to i32
+ %4 = zext i8 %1 to i32
+ %5 = shl i32 %3, %4
+ %6 = trunc i32 %5 to i8
+ ret i8 %6
+}
+
+define zeroext i16 @func7(i16 zeroext %0, i16 zeroext %1) {
+; CHECK-LABEL: func7:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, %s1
+; CHECK-NEXT: and %s0, %s0, (48)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = zext i16 %0 to i32
+ %4 = zext i16 %1 to i32
+ %5 = shl i32 %3, %4
+ %6 = trunc i32 %5 to i16
+ ret i16 %6
+}
+
+define i32 @func8(i32 %0, i32 %1) {
+; CHECK-LABEL: func8:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = shl i32 %0, %1
+ ret i32 %3
+}
+
+define i64 @func9(i64 %0, i64 %1) {
+; CHECK-LABEL: func9:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s1, %s1, (0)1
+; CHECK-NEXT: sll %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = shl i64 %0, %1
+ ret i64 %3
+}
+
+define signext i8 @func11(i8 signext %0) {
+; CHECK-LABEL: func11:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, 5
+; CHECK-NEXT: sla.w.sx %s0, %s0, 24
+; CHECK-NEXT: sra.w.sx %s0, %s0, 24
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i8 %0, 5
+ ret i8 %2
+}
+
+define signext i16 @func12(i16 signext %0) {
+; CHECK-LABEL: func12:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, 5
+; CHECK-NEXT: sla.w.sx %s0, %s0, 16
+; CHECK-NEXT: sra.w.sx %s0, %s0, 16
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i16 %0, 5
+ ret i16 %2
+}
+
+define i32 @func13(i32 %0) {
+; CHECK-LABEL: func13:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i32 %0, 5
+ ret i32 %2
+}
+
+define i64 @func14(i64 %0) {
+; CHECK-LABEL: func14:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sll %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i64 %0, 5
+ ret i64 %2
+}
+
+define zeroext i8 @func16(i8 zeroext %0) {
+; CHECK-LABEL: func16:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, 5
+; CHECK-NEXT: and %s0, %s0, (56)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i8 %0, 5
+ ret i8 %2
+}
+
+define zeroext i16 @func17(i16 zeroext %0) {
+; CHECK-LABEL: func17:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, 5
+; CHECK-NEXT: and %s0, %s0, (48)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i16 %0, 5
+ ret i16 %2
+}
+
+define i32 @func18(i32 %0) {
+; CHECK-LABEL: func18:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sla.w.sx %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i32 %0, 5
+ ret i32 %2
+}
+
+define i64 @func19(i64 %0) {
+; CHECK-LABEL: func19:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sll %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = shl i64 %0, 5
+ ret i64 %2
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+define signext i8 @func1(i8 signext %0, i8 signext %1) {
+; CHECK-LABEL: func1:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sext i8 %0 to i32
+ %4 = sext i8 %1 to i32
+ %5 = ashr i32 %3, %4
+ %6 = trunc i32 %5 to i8
+ ret i8 %6
+}
+
+define signext i16 @func2(i16 signext %0, i16 signext %1) {
+; CHECK-LABEL: func2:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sext i16 %0 to i32
+ %4 = sext i16 %1 to i32
+ %5 = ashr i32 %3, %4
+ %6 = trunc i32 %5 to i16
+ ret i16 %6
+}
+
+define i32 @func3(i32 %0, i32 %1) {
+; CHECK-LABEL: func3:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = ashr i32 %0, %1
+ ret i32 %3
+}
+
+define i64 @func4(i64 %0, i64 %1) {
+; CHECK-LABEL: func4:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s1, %s1, (0)1
+; CHECK-NEXT: sra.l %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = ashr i64 %0, %1
+ ret i64 %3
+}
+
+define zeroext i8 @func7(i8 zeroext %0, i8 zeroext %1) {
+; CHECK-LABEL: func7:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: srl %s0, %s0, %s1
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = zext i8 %0 to i32
+ %4 = zext i8 %1 to i32
+ %5 = lshr i32 %3, %4
+ %6 = trunc i32 %5 to i8
+ ret i8 %6
+}
+
+define zeroext i16 @func8(i16 zeroext %0, i16 zeroext %1) {
+; CHECK-LABEL: func8:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: srl %s0, %s0, %s1
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = zext i16 %0 to i32
+ %4 = zext i16 %1 to i32
+ %5 = lshr i32 %3, %4
+ %6 = trunc i32 %5 to i16
+ ret i16 %6
+}
+
+define i32 @func9(i32 %0, i32 %1) {
+; CHECK-LABEL: func9:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: srl %s0, %s0, %s1
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = lshr i32 %0, %1
+ ret i32 %3
+}
+
+define i64 @func10(i64 %0, i64 %1) {
+; CHECK-LABEL: func10:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: adds.w.sx %s1, %s1, (0)1
+; CHECK-NEXT: srl %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = lshr i64 %0, %1
+ ret i64 %3
+}
+
+define signext i8 @func12(i8 signext %0) {
+; CHECK-LABEL: func12:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.w.sx %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = ashr i8 %0, 5
+ ret i8 %2
+}
+
+define signext i16 @func13(i16 signext %0) {
+; CHECK-LABEL: func13:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.w.sx %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = ashr i16 %0, 5
+ ret i16 %2
+}
+
+define i32 @func14(i32 %0) {
+; CHECK-LABEL: func14:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.w.sx %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = ashr i32 %0, 5
+ ret i32 %2
+}
+
+define i64 @func15(i64 %0) {
+; CHECK-LABEL: func15:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: sra.l %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = ashr i64 %0, 5
+ ret i64 %2
+}
+
+define zeroext i8 @func17(i8 zeroext %0) {
+; CHECK-LABEL: func17:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: srl %s0, %s0, 5
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = lshr i8 %0, 5
+ ret i8 %2
+}
+
+define zeroext i16 @func18(i16 zeroext %0) {
+; CHECK-LABEL: func18:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: srl %s0, %s0, 5
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = lshr i16 %0, 5
+ ret i16 %2
+}
+
+define i32 @func19(i32 %0) {
+; CHECK-LABEL: func19:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: srl %s0, %s0, 5
+; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = lshr i32 %0, 5
+ ret i32 %2
+}
+
+define i64 @func20(i64 %0) {
+; CHECK-LABEL: func20:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: srl %s0, %s0, 5
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = lshr i64 %0, 5
+ ret i64 %2
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+define signext i8 @func1(i8 signext %0, i8 signext %1) {
+; CHECK-LABEL: func1:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.w.sx %s0, %s0, %s1
+; CHECK-NEXT: sla.w.sx %s0, %s0, 24
+; CHECK-NEXT: sra.w.sx %s0, %s0, 24
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub i8 %0, %1
+ ret i8 %3
+}
+
+define signext i16 @func2(i16 signext %0, i16 signext %1) {
+; CHECK-LABEL: func2:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.w.sx %s0, %s0, %s1
+; CHECK-NEXT: sla.w.sx %s0, %s0, 16
+; CHECK-NEXT: sra.w.sx %s0, %s0, 16
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub i16 %0, %1
+ ret i16 %3
+}
+
+define i32 @func3(i32 %0, i32 %1) {
+; CHECK-LABEL: func3:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub nsw i32 %0, %1
+ ret i32 %3
+}
+
+define i64 @func4(i64 %0, i64 %1) {
+; CHECK-LABEL: func4:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.l %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub nsw i64 %0, %1
+ ret i64 %3
+}
+
+define zeroext i8 @func6(i8 zeroext %0, i8 zeroext %1) {
+; CHECK-LABEL: func6:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.w.sx %s0, %s0, %s1
+; CHECK-NEXT: and %s0, %s0, (56)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub i8 %0, %1
+ ret i8 %3
+}
+
+define zeroext i16 @func7(i16 zeroext %0, i16 zeroext %1) {
+; CHECK-LABEL: func7:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.w.sx %s0, %s0, %s1
+; CHECK-NEXT: and %s0, %s0, (48)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub i16 %0, %1
+ ret i16 %3
+}
+
+define i32 @func8(i32 %0, i32 %1) {
+; CHECK-LABEL: func8:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.w.sx %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub i32 %0, %1
+ ret i32 %3
+}
+
+define i64 @func9(i64 %0, i64 %1) {
+; CHECK-LABEL: func9:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: subs.l %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = sub i64 %0, %1
+ ret i64 %3
+}
+
+define signext i8 @func13(i8 signext %0, i8 signext %1) {
+; CHECK-LABEL: func13:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: sla.w.sx %s0, %s0, 24
+; CHECK-NEXT: sra.w.sx %s0, %s0, 24
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i8 %0, -5
+ ret i8 %3
+}
+
+define signext i16 @func14(i16 signext %0, i16 signext %1) {
+; CHECK-LABEL: func14:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: sla.w.sx %s0, %s0, 16
+; CHECK-NEXT: sra.w.sx %s0, %s0, 16
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i16 %0, -5
+ ret i16 %3
+}
+
+define i32 @func15(i32 %0, i32 %1) {
+; CHECK-LABEL: func15:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add nsw i32 %0, -5
+ ret i32 %3
+}
+
+define i64 @func16(i64 %0, i64 %1) {
+; CHECK-LABEL: func16:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add nsw i64 %0, -5
+ ret i64 %3
+}
+
+define zeroext i8 @func18(i8 zeroext %0, i8 zeroext %1) {
+; CHECK-LABEL: func18:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: and %s0, %s0, (56)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i8 %0, -5
+ ret i8 %3
+}
+
+define zeroext i16 @func19(i16 zeroext %0, i16 zeroext %1) {
+; CHECK-LABEL: func19:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: and %s0, %s0, (48)0
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i16 %0, -5
+ ret i16 %3
+}
+
+define i32 @func20(i32 %0, i32 %1) {
+; CHECK-LABEL: func20:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i32 %0, -5
+ ret i32 %3
+}
+
+define i64 @func21(i64 %0, i64 %1) {
+; CHECK-LABEL: func21:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add i64 %0, -5
+ ret i64 %3
+}
+
+define i32 @func25(i32 %0, i32 %1) {
+; CHECK-LABEL: func25:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, -2147483648
+; CHECK-NEXT: xor %s0, %s0, %s1
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = xor i32 %0, -2147483648
+ ret i32 %3
+}
+
+define i64 @func26(i64 %0, i64 %1) {
+; CHECK-LABEL: func26:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, -2147483648(%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = add nsw i64 %0, -2147483648
+ ret i64 %3
+}
+