Summary: Enable unaligned load/store isel for iN and fp32/64 and tests.
Reviewed By: arsenm
Differential Revision: https://reviews.llvm.org/D73448
return VT == MVT::f32 || VT == MVT::f64;
}
+/// Determine if the target supports unaligned memory accesses.
+///
+/// This function returns true if the target allows unaligned memory accesses
+/// of the specified type in the given address space. If true, it also returns
+/// whether the unaligned memory access is "fast" in the last argument by
+/// reference. This is used, for example, in situations where an array
+/// copy/move/set is converted to a sequence of store operations. Its use
+/// helps to ensure that such replacements don't generate code that causes an
+/// alignment error (trap) on the target machine.
+bool VETargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
+ unsigned AddrSpace,
+ unsigned Align,
+ MachineMemOperand::Flags,
+ bool *Fast) const {
+ if (Fast) {
+ // It's fast anytime on VE
+ *Fast = true;
+ }
+ return true;
+}
+
VETargetLowering::VETargetLowering(const TargetMachine &TM,
const VESubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;
+ /// Returns true if the target allows unaligned memory accesses of the
+ /// specified type.
+ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
+ MachineMemOperand::Flags Flags,
+ bool *Fast) const override;
};
} // namespace llvm
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 1
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 1
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 1
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 1
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 1
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 1
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64stk() {
+; CHECK-LABEL: loadf64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 1
+ %1 = load double, double* %addr, align 1
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32stk() {
+; CHECK-LABEL: loadf32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 1
+ %1 = load float, float* %addr, align 1
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64stk() {
+; CHECK-LABEL: loadi64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 1
+ %1 = load i64, i64* %addr, align 1
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32stk() {
+; CHECK-LABEL: loadi32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 1
+ %1 = load i32, i32* %addr, align 1
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16stk() {
+; CHECK-LABEL: loadi16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 1
+ %1 = load i16, i16* %addr, align 1
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8stk() {
+; CHECK-LABEL: loadi8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 1
+ %1 = load i8, i8* %addr, align 1
+ ret i8 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64com() {
+; CHECK-LABEL: loadf64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load double, double* @vf64, align 1
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32com() {
+; CHECK-LABEL: loadf32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
+; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load float, float* @vf32, align 1
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64com() {
+; CHECK-LABEL: loadi64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i64, i64* @vi64, align 1
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32com() {
+; CHECK-LABEL: loadi32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
+; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i32, i32* @vi32, align 1
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16com() {
+; CHECK-LABEL: loadi16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi16@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
+; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i16, i16* @vi16, align 1
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8com() {
+; CHECK-LABEL: loadi8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi8@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
+; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i8, i8* @vi8, align 1
+ ret i8 %1
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 2
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 2
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 2
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 2
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 2
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 2
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64stk() {
+; CHECK-LABEL: loadf64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 2
+ %1 = load double, double* %addr, align 2
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32stk() {
+; CHECK-LABEL: loadf32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 2
+ %1 = load float, float* %addr, align 2
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64stk() {
+; CHECK-LABEL: loadi64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 2
+ %1 = load i64, i64* %addr, align 2
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32stk() {
+; CHECK-LABEL: loadi32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 2
+ %1 = load i32, i32* %addr, align 2
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16stk() {
+; CHECK-LABEL: loadi16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 2
+ %1 = load i16, i16* %addr, align 2
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8stk() {
+; CHECK-LABEL: loadi8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, 190(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 2
+ %1 = load i8, i8* %addr, align 2
+ ret i8 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64com() {
+; CHECK-LABEL: loadf64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load double, double* @vf64, align 2
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32com() {
+; CHECK-LABEL: loadf32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
+; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load float, float* @vf32, align 2
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64com() {
+; CHECK-LABEL: loadi64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i64, i64* @vi64, align 2
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32com() {
+; CHECK-LABEL: loadi32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
+; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i32, i32* @vi32, align 2
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16com() {
+; CHECK-LABEL: loadi16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi16@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
+; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i16, i16* @vi16, align 2
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8com() {
+; CHECK-LABEL: loadi8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi8@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
+; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i8, i8* @vi8, align 2
+ ret i8 %1
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 4
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 4
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 4
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 4
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 4
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 4
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64stk() {
+; CHECK-LABEL: loadf64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 4
+ %1 = load double, double* %addr, align 4
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32stk() {
+; CHECK-LABEL: loadf32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 4
+ %1 = load float, float* %addr, align 4
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64stk() {
+; CHECK-LABEL: loadi64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 4
+ %1 = load i64, i64* %addr, align 4
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32stk() {
+; CHECK-LABEL: loadi32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 4
+ %1 = load i32, i32* %addr, align 4
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16stk() {
+; CHECK-LABEL: loadi16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 4
+ %1 = load i16, i16* %addr, align 4
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8stk() {
+; CHECK-LABEL: loadi8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 4
+ %1 = load i8, i8* %addr, align 4
+ ret i8 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64com() {
+; CHECK-LABEL: loadf64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load double, double* @vf64, align 4
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32com() {
+; CHECK-LABEL: loadf32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
+; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load float, float* @vf32, align 4
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64com() {
+; CHECK-LABEL: loadi64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i64, i64* @vi64, align 4
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32com() {
+; CHECK-LABEL: loadi32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
+; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i32, i32* @vi32, align 4
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16com() {
+; CHECK-LABEL: loadi16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi16@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
+; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i16, i16* @vi16, align 4
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8com() {
+; CHECK-LABEL: loadi8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi8@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
+; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i8, i8* @vi8, align 4
+ ret i8 %1
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 8
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 8
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 8
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 8
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 8
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 8
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64stk() {
+; CHECK-LABEL: loadf64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 8
+ %1 = load double, double* %addr, align 8
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32stk() {
+; CHECK-LABEL: loadf32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldu %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 8
+ %1 = load float, float* %addr, align 8
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64stk() {
+; CHECK-LABEL: loadi64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 8
+ %1 = load i64, i64* %addr, align 8
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32stk() {
+; CHECK-LABEL: loadi32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ldl.sx %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 8
+ %1 = load i32, i32* %addr, align 8
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16stk() {
+; CHECK-LABEL: loadi16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld2b.zx %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 8
+ %1 = load i16, i16* %addr, align 8
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8stk() {
+; CHECK-LABEL: loadi8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: ld1b.zx %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 8
+ %1 = load i8, i8* %addr, align 8
+ ret i8 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64com() {
+; CHECK-LABEL: loadf64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load double, double* @vf64, align 8
+ ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32com() {
+; CHECK-LABEL: loadf32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vf32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
+; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load float, float* @vf32, align 8
+ ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64com() {
+; CHECK-LABEL: loadi64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi64@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
+; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i64, i64* @vi64, align 8
+ ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32com() {
+; CHECK-LABEL: loadi32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi32@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
+; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i32, i32* @vi32, align 8
+ ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16com() {
+; CHECK-LABEL: loadi16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi16@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
+; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i16, i16* @vi16, align 8
+ ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8com() {
+; CHECK-LABEL: loadi8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s0, vi8@lo
+; CHECK-NEXT: and %s0, %s0, (32)0
+; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
+; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: or %s11, 0, %s9
+ %1 = load i8, i8* @vi8, align 8
+ ret i8 %1
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 1
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 1
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 1
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 1
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 1
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 1
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64stk(double %0) {
+; CHECK-LABEL: storef64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 1
+ store double %0, double* %addr, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32stk(float %0) {
+; CHECK-LABEL: storef32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 1
+ store float %0, float* %addr, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64stk(i64 %0) {
+; CHECK-LABEL: storei64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 1
+ store i64 %0, i64* %addr, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32stk(i32 %0) {
+; CHECK-LABEL: storei32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 1
+ store i32 %0, i32* %addr, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16stk(i16 %0) {
+; CHECK-LABEL: storei16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s0, 190(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 1
+ store i16 %0, i16* %addr, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8stk(i8 %0) {
+; CHECK-LABEL: storei8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s0, 191(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 1
+ store i8 %0, i8* %addr, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64com(double %0) {
+; CHECK-LABEL: storef64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store double %0, double* @vf64, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32com(float %0) {
+; CHECK-LABEL: storef32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
+; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store float %0, float* @vf32, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64com(i64 %0) {
+; CHECK-LABEL: storei64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i64 %0, i64* @vi64, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32com(i32 %0) {
+; CHECK-LABEL: storei32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
+; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i32 %0, i32* @vi32, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16com(i16 %0) {
+; CHECK-LABEL: storei16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi16@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
+; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i16 %0, i16* @vi16, align 1
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8com(i8 %0) {
+; CHECK-LABEL: storei8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi8@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
+; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i8 %0, i8* @vi8, align 1
+ ret void
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 2
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 2
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 2
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 2
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 2
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 2
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64stk(double %0) {
+; CHECK-LABEL: storef64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 2
+ store double %0, double* %addr, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32stk(float %0) {
+; CHECK-LABEL: storef32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 2
+ store float %0, float* %addr, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64stk(i64 %0) {
+; CHECK-LABEL: storei64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 2
+ store i64 %0, i64* %addr, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32stk(i32 %0) {
+; CHECK-LABEL: storei32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 2
+ store i32 %0, i32* %addr, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16stk(i16 %0) {
+; CHECK-LABEL: storei16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s0, 190(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 2
+ store i16 %0, i16* %addr, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8stk(i8 %0) {
+; CHECK-LABEL: storei8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s0, 190(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 2
+ store i8 %0, i8* %addr, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64com(double %0) {
+; CHECK-LABEL: storef64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store double %0, double* @vf64, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32com(float %0) {
+; CHECK-LABEL: storef32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
+; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store float %0, float* @vf32, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64com(i64 %0) {
+; CHECK-LABEL: storei64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i64 %0, i64* @vi64, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32com(i32 %0) {
+; CHECK-LABEL: storei32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
+; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i32 %0, i32* @vi32, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16com(i16 %0) {
+; CHECK-LABEL: storei16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi16@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
+; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i16 %0, i16* @vi16, align 2
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8com(i8 %0) {
+; CHECK-LABEL: storei8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi8@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
+; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i8 %0, i8* @vi8, align 2
+ ret void
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 4
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 4
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 4
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 4
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 4
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 4
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64stk(double %0) {
+; CHECK-LABEL: storef64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 4
+ store double %0, double* %addr, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32stk(float %0) {
+; CHECK-LABEL: storef32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 4
+ store float %0, float* %addr, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64stk(i64 %0) {
+; CHECK-LABEL: storei64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 4
+ store i64 %0, i64* %addr, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32stk(i32 %0) {
+; CHECK-LABEL: storei32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 4
+ store i32 %0, i32* %addr, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16stk(i16 %0) {
+; CHECK-LABEL: storei16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 4
+ store i16 %0, i16* %addr, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8stk(i8 %0) {
+; CHECK-LABEL: storei8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s0, 188(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 4
+ store i8 %0, i8* %addr, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64com(double %0) {
+; CHECK-LABEL: storef64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store double %0, double* @vf64, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32com(float %0) {
+; CHECK-LABEL: storef32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
+; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store float %0, float* @vf32, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64com(i64 %0) {
+; CHECK-LABEL: storei64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i64 %0, i64* @vi64, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32com(i32 %0) {
+; CHECK-LABEL: storei32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
+; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i32 %0, i32* @vi32, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16com(i16 %0) {
+; CHECK-LABEL: storei16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi16@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
+; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i16 %0, i16* @vi16, align 4
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8com(i8 %0) {
+; CHECK-LABEL: storei8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi8@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
+; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i8 %0, i8* @vi8, align 4
+ ret void
+}
+
--- /dev/null
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+@vi8 = common dso_local local_unnamed_addr global i8 0, align 8
+@vi16 = common dso_local local_unnamed_addr global i16 0, align 8
+@vi32 = common dso_local local_unnamed_addr global i32 0, align 8
+@vi64 = common dso_local local_unnamed_addr global i64 0, align 8
+@vf32 = common dso_local local_unnamed_addr global float 0.000000e+00, align 8
+@vf64 = common dso_local local_unnamed_addr global double 0.000000e+00, align 8
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64stk(double %0) {
+; CHECK-LABEL: storef64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca double, align 8
+ store double %0, double* %addr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32stk(float %0) {
+; CHECK-LABEL: storef32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stu %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca float, align 8
+ store float %0, float* %addr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64stk(i64 %0) {
+; CHECK-LABEL: storei64stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i64, align 8
+ store i64 %0, i64* %addr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32stk(i32 %0) {
+; CHECK-LABEL: storei32stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: stl %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i32, align 8
+ store i32 %0, i32* %addr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16stk(i16 %0) {
+; CHECK-LABEL: storei16stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st2b %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i16, align 8
+ store i16 %0, i16* %addr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8stk(i8 %0) {
+; CHECK-LABEL: storei8stk:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: st1b %s0, 184(,%s11)
+; CHECK-NEXT: or %s11, 0, %s9
+ %addr = alloca i8, align 8
+ store i8 %0, i8* %addr, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64com(double %0) {
+; CHECK-LABEL: storef64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store double %0, double* @vf64, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32com(float %0) {
+; CHECK-LABEL: storef32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vf32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
+; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store float %0, float* @vf32, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64com(i64 %0) {
+; CHECK-LABEL: storei64com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi64@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
+; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i64 %0, i64* @vi64, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32com(i32 %0) {
+; CHECK-LABEL: storei32com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi32@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
+; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i32 %0, i32* @vi32, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16com(i16 %0) {
+; CHECK-LABEL: storei16com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi16@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
+; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i16 %0, i16* @vi16, align 8
+ ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8com(i8 %0) {
+; CHECK-LABEL: storei8com:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: lea %s1, vi8@lo
+; CHECK-NEXT: and %s1, %s1, (32)0
+; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
+; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: or %s11, 0, %s9
+ store i8 %0, i8* @vi8, align 8
+ ret void
+}
+