case ISD::FSHR:
ExpandIntRes_FunnelShift(N, Lo, Hi);
break;
+
+ case ISD::VSCALE:
+ ExpandIntRes_VSCALE(N, Lo, Hi);
+ break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
SplitInteger(Res, Lo, Hi);
}
+void DAGTypeLegalizer::ExpandIntRes_VSCALE(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ EVT VT = N->getValueType(0);
+ EVT HalfVT =
+ EVT::getIntegerVT(*DAG.getContext(), N->getValueSizeInBits(0) / 2);
+ SDLoc dl(N);
+
+ // We assume VSCALE(1) fits into a legal integer.
+ APInt One(HalfVT.getSizeInBits(), 1);
+ SDValue VScaleBase = DAG.getVScale(dl, HalfVT, One);
+ VScaleBase = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, VScaleBase);
+ SDValue Res = DAG.getNode(ISD::MUL, dl, VT, VScaleBase, N->getOperand(0));
+ SplitInteger(Res, Lo, Hi);
+}
+
//===----------------------------------------------------------------------===//
// Integer Operand Expansion
//===----------------------------------------------------------------------===//
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv64 -mattr=+m,+experimental-v < %s \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s -check-prefix=RV64
+; RUN: llc -mtriple riscv32 -mattr=+m,+experimental-v < %s \
+; RUN: | FileCheck %s -check-prefix=RV32
+
define i64 @vscale_zero() nounwind {
-; CHECK-LABEL: vscale_zero:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mv a0, zero
-; CHECK-NEXT: ret
+; RV64-LABEL: vscale_zero:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: mv a0, zero
+; RV64-NEXT: ret
+;
+; RV32-LABEL: vscale_zero:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: mv a0, zero
+; RV32-NEXT: mv a1, zero
+; RV32-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 0
}
define i64 @vscale_one() nounwind {
-; CHECK-LABEL: vscale_one:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: ret
+; RV64-LABEL: vscale_one:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: srli a0, a0, 3
+; RV64-NEXT: ret
+;
+; RV32-LABEL: vscale_one:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: srli a0, a0, 3
+; RV32-NEXT: mv a1, zero
+; RV32-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 1
}
define i64 @vscale_uimmpow2xlen() nounwind {
-; CHECK-LABEL: vscale_uimmpow2xlen:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: ret
+; RV64-LABEL: vscale_uimmpow2xlen:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: ret
+;
+; RV32-LABEL: vscale_uimmpow2xlen:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: srli a1, a0, 29
+; RV32-NEXT: srli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 6
+; RV32-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 64
}
define i64 @vscale_non_pow2() nounwind {
-; CHECK-LABEL: vscale_non_pow2:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: addi a1, zero, 24
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: ret
+; RV64-LABEL: vscale_non_pow2:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: srli a0, a0, 3
+; RV64-NEXT: addi a1, zero, 24
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: ret
+;
+; RV32-LABEL: vscale_non_pow2:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: srli a1, a0, 3
+; RV32-NEXT: addi a2, zero, 24
+; RV32-NEXT: mul a0, a1, a2
+; RV32-NEXT: mulhu a1, a1, a2
+; RV32-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 24