setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
MVT::i1, Promote);
+ // DAGCombiner can call isLoadExtLegal for types that aren't legal.
+ setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i32,
+ MVT::i1, Promote);
// TODO: add all necessary setOperationAction calls.
setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+
setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
MVT::i32, Custom);
Results.push_back(RCW.getValue(2));
break;
}
+ case ISD::LOAD: {
+ if (!ISD::isNON_EXTLoad(N))
+ return;
+
+ // Use a SEXTLOAD instead of the default EXTLOAD. Similar to the
+ // sext_inreg we emit for ADD/SUB/MUL/SLLI.
+ LoadSDNode *Ld = cast<LoadSDNode>(N);
+
+ SDLoc dl(N);
+ SDValue Res = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Ld->getChain(),
+ Ld->getBasePtr(), Ld->getMemoryVT(),
+ Ld->getMemOperand());
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Res));
+ Results.push_back(Res.getValue(1));
+ return;
+ }
case ISD::MUL: {
unsigned Size = N->getSimpleValueType(0).getSizeInBits();
unsigned XLen = Subtarget.getXLen();
declare signext i32 @bar(i32 signext)
-; The load here will be an anyext load in isel and sext.w will be emitted for
-; the ret. Make sure we can look through logic ops to prove the sext.w is
-; unnecessary.
+; The load here was previously an aext load, but this has since been changed
+; to a signext load allowing us to remove a sext.w before isel. Thus we get
+; the same result with or without the sext.w removal pass.
+; Test has been left for coverage purposes.
define signext i32 @test2(i32* %p, i32 signext %b) nounwind {
; RV64I-LABEL: test2:
; RV64I: # %bb.0:
; NOREMOVAL-NEXT: li a2, -2
; NOREMOVAL-NEXT: rolw a1, a2, a1
; NOREMOVAL-NEXT: and a0, a1, a0
-; NOREMOVAL-NEXT: sext.w a0, a0
; NOREMOVAL-NEXT: ret
%a = load i32, i32* %p
%shl = shl i32 1, %b
; NOREMOVAL-NEXT: li a2, -2
; NOREMOVAL-NEXT: rolw a1, a2, a1
; NOREMOVAL-NEXT: or a0, a1, a0
-; NOREMOVAL-NEXT: sext.w a0, a0
; NOREMOVAL-NEXT: ret
%a = load i32, i32* %p
%shl = shl i32 1, %b
; NOREMOVAL-NEXT: li a2, 1
; NOREMOVAL-NEXT: sllw a1, a2, a1
; NOREMOVAL-NEXT: xnor a0, a1, a0
-; NOREMOVAL-NEXT: sext.w a0, a0
; NOREMOVAL-NEXT: ret
%a = load i32, i32* %p
%shl = shl i32 1, %b
;
; RV64-LABEL: vec3_setcc_crash:
; RV64: # %bb.0:
-; RV64-NEXT: lwu a0, 0(a0)
+; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: slli a2, a0, 40
; RV64-NEXT: slli a3, a0, 56
; RV64-NEXT: slli a4, a0, 48
; RV64-NEXT: li a0, 0
; RV64-NEXT: j .LBB0_8
; RV64-NEXT: .LBB0_7:
-; RV64-NEXT: srli a0, a0, 16
+; RV64-NEXT: srliw a0, a0, 16
; RV64-NEXT: .LBB0_8:
; RV64-NEXT: sb a0, 2(a1)
; RV64-NEXT: sh a2, 0(a1)