return XLen - EltBits + 1;
break;
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ default:
+ break;
+ case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
+ case Intrinsic::riscv_masked_atomicrmw_add_i64:
+ case Intrinsic::riscv_masked_atomicrmw_sub_i64:
+ case Intrinsic::riscv_masked_atomicrmw_nand_i64:
+ case Intrinsic::riscv_masked_atomicrmw_max_i64:
+ case Intrinsic::riscv_masked_atomicrmw_min_i64:
+ case Intrinsic::riscv_masked_atomicrmw_umax_i64:
+ case Intrinsic::riscv_masked_atomicrmw_umin_i64:
+ case Intrinsic::riscv_masked_cmpxchg_i64:
+ // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
+ // narrow atomic operation. These are implemented using atomic
+ // operations at the minimum supported atomicrmw/cmpxchg width whose
+ // result is then sign extended to XLEN. With +A, the minimum width is
+ // 32 for both 64 and 32.
+ assert(Subtarget.getXLen() == 64);
+ assert(getMinCmpXchgSizeInBits() == 32);
+ assert(Subtarget.hasStdExtA());
+ return 33;
+ }
+ }
}
return 1;
; RV64IA-NEXT: .LBB2_5: # %do_cmpxchg
; RV64IA-NEXT: # in Loop: Header=BB2_1 Depth=1
; RV64IA-NEXT: and a4, a4, a0
-; RV64IA-NEXT: sext.w a4, a4
; RV64IA-NEXT: bne a1, a4, .LBB2_1
; RV64IA-NEXT: # %bb.2: # %exit
; RV64IA-NEXT: ret
; RV64IA-NEXT: .LBB3_5: # %do_cmpxchg
; RV64IA-NEXT: # in Loop: Header=BB3_1 Depth=1
; RV64IA-NEXT: and a4, a4, a0
-; RV64IA-NEXT: sext.w a4, a4
; RV64IA-NEXT: beq a1, a4, .LBB3_1
; RV64IA-NEXT: # %bb.2: # %exit
; RV64IA-NEXT: ret
; RV64IA-NEXT: bnez a5, .LBB48_1
; RV64IA-NEXT: .LBB48_3:
; RV64IA-NEXT: and a0, a2, a4
-; RV64IA-NEXT: sext.w a0, a0
; RV64IA-NEXT: xor a0, a1, a0
; RV64IA-NEXT: seqz a0, a0
; RV64IA-NEXT: ret
; RV64IA-NEXT: bnez a4, .LBB50_1
; RV64IA-NEXT: .LBB50_3:
; RV64IA-NEXT: and a0, a2, a5
-; RV64IA-NEXT: sext.w a0, a0
; RV64IA-NEXT: xor a0, a1, a0
; RV64IA-NEXT: seqz a0, a0
; RV64IA-NEXT: ret