void Select(SDNode *N) override;
+ // Complex Pattern Selectors.
+ bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
+
StringRef getPassName() const override {
return "VE DAG->DAG Pattern Instruction Selection";
}
};
} // end anonymous namespace
+bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ auto AddrTy = Addr->getValueType(0);
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), AddrTy);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ ConstantSDNode *CN = cast<ConstantSDNode>(Addr.getOperand(1));
+ if (isInt<13>(CN->getSExtValue())) {
+ if (FrameIndexSDNode *FIN =
+ dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
+ // Constant offset from frame ref.
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), AddrTy);
+ } else {
+ Base = Addr.getOperand(0);
+ }
+ Offset =
+ CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+}
+
void VEDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
return CurDAG->getTargetConstant(cc, SDLoc(N), MVT::i32);
}]>;
+// Addressing modes.
+def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
+
// ASX format of memory address
def MEMri : Operand<iPTR> {
let PrintMethod = "printMemASXOperand";
let cx = 0 in
def LDSri : RM<
0x01, (outs I64:$sx), (ins MEMri:$addr),
- "ld $sx, $addr">;
+ "ld $sx, $addr",
+ [(set i64:$sx, (load ADDRri:$addr))]>;
+let cx = 0 in
+def LDUri : RM<
+ 0x02, (outs F32:$sx), (ins MEMri:$addr),
+ "ldu $sx, $addr",
+ [(set f32:$sx, (load ADDRri:$addr))]>;
+let cx = 0 in
+def LDLri : RM<
+ 0x03, (outs I32:$sx), (ins MEMri:$addr),
+ "ldl.sx $sx, $addr",
+ [(set i32:$sx, (load ADDRri:$addr))]>;
+let cx = 1 in
+def LDLUri : RM<
+ 0x03, (outs I32:$sx), (ins MEMri:$addr),
+ "ldl.zx $sx, $addr",
+ [(set i32:$sx, (load ADDRri:$addr))]>;
+let cx = 0 in
+def LD2Bri : RM<
+ 0x04, (outs I32:$sx), (ins MEMri:$addr),
+ "ld2b.sx $sx, $addr",
+ [(set i32:$sx, (sextloadi16 ADDRri:$addr))]>;
+let cx = 1 in
+def LD2BUri : RM<
+ 0x04, (outs I32:$sx), (ins MEMri:$addr),
+ "ld2b.zx $sx, $addr",
+ [(set i32:$sx, (zextloadi16 ADDRri:$addr))]>;
+let cx = 0 in
+def LD1Bri : RM<
+ 0x05, (outs I32:$sx), (ins MEMri:$addr),
+ "ld1b.sx $sx, $addr",
+ [(set i32:$sx, (sextloadi8 ADDRri:$addr))]>;
+let cx = 1 in
+def LD1BUri : RM<
+ 0x05, (outs I32:$sx), (ins MEMri:$addr),
+ "ld1b.zx $sx, $addr",
+ [(set i32:$sx, (zextloadi8 ADDRri:$addr))]>;
}
}
let cx = 0, cy = 0, sy = 0, cz = 1 in {
def STSri : RM<
0x11, (outs), (ins MEMri:$addr, I64:$sx),
- "st $sx, $addr">;
+ "st $sx, $addr",
+ [(store i64:$sx, ADDRri:$addr)]>;
+def STUri : RM<
+ 0x12, (outs), (ins MEMri:$addr, F32:$sx),
+ "stu $sx, $addr",
+ [(store f32:$sx, ADDRri:$addr)]>;
+def STLri : RM<
+ 0x13, (outs), (ins MEMri:$addr, I32:$sx),
+ "stl $sx, $addr",
+ [(store i32:$sx, ADDRri:$addr)]>;
+def ST2Bri : RM<
+ 0x14, (outs), (ins MEMri:$addr, I32:$sx),
+ "st2b $sx, $addr",
+ [(truncstorei16 i32:$sx, ADDRri:$addr)]>;
+def ST1Bri : RM<
+ 0x15, (outs), (ins MEMri:$addr, I32:$sx),
+ "st1b $sx, $addr",
+ [(truncstorei8 i32:$sx, ADDRri:$addr)]>;
}
}
+def : Pat<(f64 (load ADDRri:$addr)), (LDSri ADDRri:$addr)>;
+def : Pat<(store f64:$sx, ADDRri:$addr), (STSri ADDRri:$addr, $sx)>;
+
// Return instruction is also a special case of jump.
let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cf = 15 /* AT */, cy = 0, sy = 0,
cz = 1, sz = 0x10 /* SX10 */, imm32 = 0, Uses = [SX10],
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32)>;
+// extload, sextload and zextload stuff
+def : Pat<(i64 (sextloadi8 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1Bri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (zextloadi8 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (sextloadi16 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2Bri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (zextloadi16 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (sextloadi32 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (zextloadi32 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (extloadi8 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (extloadi16 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (extloadi32 ADDRri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
+
+// anyextload
+def : Pat<(extloadi8 ADDRri:$addr), (LD1BUri MEMri:$addr)>;
+def : Pat<(extloadi16 ADDRri:$addr), (LD2BUri MEMri:$addr)>;
+
+// truncstore
+def : Pat<(truncstorei8 i64:$src, ADDRri:$addr),
+ (ST1Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+def : Pat<(truncstorei16 i64:$src, ADDRri:$addr),
+ (ST2Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+def : Pat<(truncstorei32 i64:$src, ADDRri:$addr),
+ (STLri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
//===----------------------------------------------------------------------===//
// Pseudo Instructions
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64(double* nocapture readonly %0) {
+; CHECK-LABEL: loadf64:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load double, double* %0, align 16
+ ret double %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32(float* nocapture readonly %0) {
+; CHECK-LABEL: loadf32:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load float, float* %0, align 16
+ ret float %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64(i64* nocapture readonly %0) {
+; CHECK-LABEL: loadi64:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i64, i64* %0, align 16
+ ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32(i32* nocapture readonly %0) {
+; CHECK-LABEL: loadi32:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i32, i32* %0, align 16
+ ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi32sext(i32* nocapture readonly %0) {
+; CHECK-LABEL: loadi32sext:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i32, i32* %0, align 16
+ %3 = sext i32 %2 to i64
+ ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi32zext(i32* nocapture readonly %0) {
+; CHECK-LABEL: loadi32zext:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i32, i32* %0, align 16
+ %3 = zext i32 %2 to i64
+ ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16(i16* nocapture readonly %0) {
+; CHECK-LABEL: loadi16:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i16, i16* %0, align 16
+ ret i16 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi16sext(i16* nocapture readonly %0) {
+; CHECK-LABEL: loadi16sext:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i16, i16* %0, align 16
+ %3 = sext i16 %2 to i64
+ ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi16zext(i16* nocapture readonly %0) {
+; CHECK-LABEL: loadi16zext:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i16, i16* %0, align 16
+ %3 = zext i16 %2 to i64
+ ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8(i8* nocapture readonly %0) {
+; CHECK-LABEL: loadi8:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i8, i8* %0, align 16
+ ret i8 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi8sext(i8* nocapture readonly %0) {
+; CHECK-LABEL: loadi8sext:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i8, i8* %0, align 16
+ %3 = sext i8 %2 to i64
+ ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi8zext(i8* nocapture readonly %0) {
+; CHECK-LABEL: loadi8zext:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %2 = load i8, i8* %0, align 16
+ %3 = zext i8 %2 to i64
+ ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64stk() {
+; CHECK-LABEL: loadf64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 16
+ %1 = load double, double* %addr, align 16
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32stk() {
+; CHECK-LABEL: loadf32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldu %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 16
+ %1 = load float, float* %addr, align 16
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64stk() {
+; CHECK-LABEL: loadi64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 16
+ %1 = load i64, i64* %addr, align 16
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32stk() {
+; CHECK-LABEL: loadi32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 16
+ %1 = load i32, i32* %addr, align 16
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16stk() {
+; CHECK-LABEL: loadi16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 16
+ %1 = load i16, i16* %addr, align 16
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8stk() {
+; CHECK-LABEL: loadi8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 16
+ %1 = load i8, i8* %addr, align 16
+ ret i8 %1
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64(double* nocapture %0, double %1) {
+; CHECK-LABEL: storef64:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ store double %1, double* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32(float* nocapture %0, float %1) {
+; CHECK-LABEL: storef32:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stu %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ store float %1, float* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64(i64* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei64:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i64 %1, i64* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32(i32* nocapture %0, i32 %1) {
+; CHECK-LABEL: storei32:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i32 %1, i32* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32tr(i32* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei32tr:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = trunc i64 %1 to i32
+ store i32 %3, i32* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16(i16* nocapture %0, i16 %1) {
+; CHECK-LABEL: storei16:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i16 %1, i16* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16tr(i16* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei16tr:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = trunc i64 %1 to i16
+ store i16 %3, i16* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8(i8* nocapture %0, i8 %1) {
+; CHECK-LABEL: storei8:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i8 %1, i8* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8tr(i8* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei8tr:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %3 = trunc i64 %1 to i8
+ store i8 %3, i8* %0, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64stk(double %0) {
+; CHECK-LABEL: storef64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 16
+ store double %0, double* %addr, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32stk(float %0) {
+; CHECK-LABEL: storef32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stu %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 16
+ store float %0, float* %addr, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64stk(i64 %0) {
+; CHECK-LABEL: storei64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 16
+ store i64 %0, i64* %addr, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32stk(i32 %0) {
+; CHECK-LABEL: storei32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 16
+ store i32 %0, i32* %addr, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16stk(i16 %0) {
+; CHECK-LABEL: storei16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 16
+ store i16 %0, i16* %addr, align 16
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8stk(i8 %0) {
+; CHECK-LABEL: storei8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s0, 176(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 16
+ store i8 %0, i8* %addr, align 16
+ ret void
+}