return !C || !isInt<12>(C->getSExtValue());
}]>;
+def Shifted32OnesMask : PatLeaf<(imm), [{
+ uint64_t Imm = N->getZExtValue();
+ if (!isShiftedMask_64(Imm))
+ return false;
+
+ unsigned TrailingZeros = countTrailingZeros(Imm);
+ return TrailingZeros > 0 && TrailingZeros < 32 &&
+ Imm == UINT64_C(0xFFFFFFFF) << TrailingZeros;
+}], TrailingZeros>;
+
def sh1add_op : ComplexPattern<XLenVT, 1, "selectSH1ADDOp", [], [], 6>;
def sh2add_op : ComplexPattern<XLenVT, 1, "selectSH2ADDOp", [], [], 6>;
def sh3add_op : ComplexPattern<XLenVT, 1, "selectSH3ADDOp", [], [], 6>;
let Predicates = [HasStdExtZba, IsRV64] in {
def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
(SLLI_UW GPR:$rs1, uimm5:$shamt)>;
+// Match a shifted 0xffffffff mask. Use SRLI to clear the LSBs and SLLI_UW to
+// mask and shift.
+def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
+ (SLLI_UW (SRLI GPR:$rs1, Shifted32OnesMask:$mask),
+ Shifted32OnesMask:$mask)>;
+
def : Pat<(i64 (add (and GPR:$rs1, 0xFFFFFFFF), non_imm12:$rs2)),
(ADD_UW GPR:$rs1, GPR:$rs2)>;
def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, X0)>;
ret i16 %6
}
+define i128 @slliuw_ptrdiff(i64 %diff, i128* %baseptr) {
+; RV64I-LABEL: slliuw_ptrdiff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 36
+; RV64I-NEXT: addi a2, a2, -16
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a1, a1, a0
+; RV64I-NEXT: ld a0, 0(a1)
+; RV64I-NEXT: ld a1, 8(a1)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: slliuw_ptrdiff:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a0, a0, 4
+; RV64ZBA-NEXT: slli.uw a0, a0, 4
+; RV64ZBA-NEXT: add a1, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a1)
+; RV64ZBA-NEXT: ld a1, 8(a1)
+; RV64ZBA-NEXT: ret
+ %ptrdiff = lshr exact i64 %diff, 4
+ %cast = and i64 %ptrdiff, 4294967295
+ %ptr = getelementptr inbounds i128, i128* %baseptr, i64 %cast
+ %res = load i128, i128* %ptr
+ ret i128 %res
+}
+
define signext i32 @srliw_2_sh2add(i32* %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh2add:
; RV64I: # %bb.0: