bool IsScalarizable =
MemVT.isFixedLengthVector() && ISD::isNormalStore(Store) &&
+ Store->isSimple() &&
MemVT.getVectorElementType().bitsLE(Subtarget.getXLenVT()) &&
isPowerOf2_64(MemVT.getSizeInBits()) &&
MemVT.getSizeInBits() <= Subtarget.getXLen();
// vle16.v v8, (a0)
// vse16.v v8, (a1)
if (auto *L = dyn_cast<LoadSDNode>(Val);
- L && DCI.isBeforeLegalize() && IsScalarizable &&
+ L && DCI.isBeforeLegalize() && IsScalarizable && L->isSimple() &&
L->hasNUsesOfValue(1, 0) && L->hasNUsesOfValue(1, 1) &&
Store->getChain() == SDValue(L, 1) && ISD::isNormalLoad(L) &&
L->getMemoryVT() == MemVT) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: lh a0, 0(a0)
-; CHECK-NEXT: sh a0, 0(a1)
+; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%v = load volatile <2 x i8>, ptr %p
store <2 x i8> %v, ptr %q
define void @v2i8_volatile_store(ptr %p, ptr %q) {
; CHECK-LABEL: v2i8_volatile_store:
; CHECK: # %bb.0:
-; CHECK-NEXT: lh a0, 0(a0)
-; CHECK-NEXT: sh a0, 0(a1)
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%v = load <2 x i8>, ptr %p
store volatile <2 x i8> %v, ptr %q
store <2 x i8> <i8 undef, i8 3>, ptr %p
ret void
}
+
+define void @store_constant_v2i8_volatile(ptr %p) {
+; CHECK-LABEL: store_constant_v2i8_volatile:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 1
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+ store volatile <2 x i8> <i8 1, i8 1>, ptr %p
+ ret void
+}