This converts MULW to MUL if the upper bits aren't used.
This will give more opportunities to use c.mul with Zcb.
for (auto I = MBB.begin(), IE = MBB.end(); I != IE; ++I) {
MachineInstr &MI = *I;
+ unsigned Opc;
switch (MI.getOpcode()) {
- case RISCV::ADDW:
- case RISCV::SLLIW:
- if (TII.hasAllWUsers(MI, MRI)) {
- unsigned Opc =
- MI.getOpcode() == RISCV::ADDW ? RISCV::ADD : RISCV::SLLI;
- MI.setDesc(TII.get(Opc));
- MadeChange = true;
- }
- break;
+ default:
+ continue;
+ case RISCV::ADDW: Opc = RISCV::ADD; break;
+ case RISCV::MULW: Opc = RISCV::MUL; break;
+ case RISCV::SLLIW: Opc = RISCV::SLLI; break;
+ }
+
+ if (TII.hasAllWUsers(MI, MRI)) {
+ MI.setDesc(TII.get(Opc));
+ MadeChange = true;
}
}
}
; RV64IMB-LABEL: add_mul_combine_accept_a1:
; RV64IMB: # %bb.0:
; RV64IMB-NEXT: li a1, 29
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 1073
; RV64IMB-NEXT: ret
%tmp0 = add i32 %x, 37
; RV64IMB-LABEL: add_mul_combine_accept_a2:
; RV64IMB: # %bb.0:
; RV64IMB-NEXT: li a1, 29
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 1073
; RV64IMB-NEXT: ret
%tmp0 = add i32 %x, 37
; RV64IMB-LABEL: add_mul_combine_accept_b1:
; RV64IMB: # %bb.0:
; RV64IMB-NEXT: li a1, 23
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: lui a1, 50
; RV64IMB-NEXT: addiw a1, a1, 1119
; RV64IMB-NEXT: addw a0, a0, a1
; RV64IMB-LABEL: add_mul_combine_accept_b2:
; RV64IMB: # %bb.0:
; RV64IMB-NEXT: li a1, 23
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: lui a1, 50
; RV64IMB-NEXT: addiw a1, a1, 1119
; RV64IMB-NEXT: addw a0, a0, a1
; RV64IMB: # %bb.0:
; RV64IMB-NEXT: addiw a0, a0, 1972
; RV64IMB-NEXT: li a1, 29
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 11
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, 29
; RV64IMB: # %bb.0:
; RV64IMB-NEXT: addiw a0, a0, 1972
; RV64IMB-NEXT: li a1, 29
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 11
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, 29
; RV64IMB-NEXT: addiw a0, a0, 3
; RV64IMB-NEXT: lui a1, 1
; RV64IMB-NEXT: addiw a1, a1, -1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, -10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, 3000
; RV64IMB-NEXT: addiw a0, a0, 3
; RV64IMB-NEXT: lui a1, 1
; RV64IMB-NEXT: addiw a1, a1, -1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, -10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, 3000
; RV64IMB-NEXT: addiw a0, a0, -3
; RV64IMB-NEXT: lui a1, 1
; RV64IMB-NEXT: addiw a1, a1, -1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, 3000
; RV64IMB-NEXT: addiw a0, a0, -3
; RV64IMB-NEXT: lui a1, 1
; RV64IMB-NEXT: addiw a1, a1, -1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, 3000
; RV64IMB-NEXT: addiw a0, a0, -3
; RV64IMB-NEXT: lui a1, 1048575
; RV64IMB-NEXT: addiw a1, a1, 1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, -10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, -3000
; RV64IMB-NEXT: addiw a0, a0, -3
; RV64IMB-NEXT: lui a1, 1048575
; RV64IMB-NEXT: addiw a1, a1, 1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, -10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, -3000
; RV64IMB-NEXT: addiw a0, a0, 3
; RV64IMB-NEXT: lui a1, 1048575
; RV64IMB-NEXT: addiw a1, a1, 1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, -3000
; RV64IMB-NEXT: addiw a0, a0, 3
; RV64IMB-NEXT: lui a1, 1048575
; RV64IMB-NEXT: addiw a1, a1, 1096
-; RV64IMB-NEXT: mulw a0, a0, a1
+; RV64IMB-NEXT: mul a0, a0, a1
; RV64IMB-NEXT: addiw a0, a0, 10
; RV64IMB-NEXT: ret
%tmp0 = mul i32 %x, -3000
;
; RV64I-LABEL: f:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: mulw a0, a1, a0
+; RV64I-NEXT: mul a0, a1, a0
; RV64I-NEXT: slli a1, a0, 58
; RV64I-NEXT: srli a1, a1, 60
; RV64I-NEXT: slli a0, a0, 52
; RV64I-NEXT: srli a0, a0, 57
-; RV64I-NEXT: mulw a0, a1, a0
+; RV64I-NEXT: mul a0, a1, a0
; RV64I-NEXT: addw a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: f:
; RV64ZBB: # %bb.0: # %entry
-; RV64ZBB-NEXT: mulw a0, a1, a0
+; RV64ZBB-NEXT: mul a0, a1, a0
; RV64ZBB-NEXT: slli a1, a0, 58
; RV64ZBB-NEXT: srli a1, a1, 60
; RV64ZBB-NEXT: slli a0, a0, 52
; RV64ZBB-NEXT: srli a0, a0, 57
-; RV64ZBB-NEXT: mulw a0, a1, a0
+; RV64ZBB-NEXT: mul a0, a1, a0
; RV64ZBB-NEXT: addw a0, a0, a2
; RV64ZBB-NEXT: ret
;
; RV64XTHEADMAC-LABEL: f:
; RV64XTHEADMAC: # %bb.0: # %entry
-; RV64XTHEADMAC-NEXT: mulw a0, a1, a0
+; RV64XTHEADMAC-NEXT: mul a0, a1, a0
; RV64XTHEADMAC-NEXT: slli a1, a0, 58
; RV64XTHEADMAC-NEXT: srli a1, a1, 60
; RV64XTHEADMAC-NEXT: slli a0, a0, 52
; RV64XTHEADBB-NEXT: mul a0, a1, a0
; RV64XTHEADBB-NEXT: th.extu a1, a0, 5, 2
; RV64XTHEADBB-NEXT: th.extu a0, a0, 11, 5
-; RV64XTHEADBB-NEXT: mulw a0, a1, a0
+; RV64XTHEADBB-NEXT: mul a0, a1, a0
; RV64XTHEADBB-NEXT: addw a0, a0, a2
; RV64XTHEADBB-NEXT: ret
;
; RV64M-NEXT: and a0, a0, a1
; RV64M-NEXT: lui a1, 30667
; RV64M-NEXT: addiw a1, a1, 1329
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: srliw a0, a0, 27
; RV64M-NEXT: lui a1, %hi(.LCPI2_0)
; RV64M-NEXT: addi a1, a1, %lo(.LCPI2_0)
; RV64M-NEXT: and a0, a0, a1
; RV64M-NEXT: lui a1, 30667
; RV64M-NEXT: addiw a1, a1, 1329
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: srliw a0, a0, 27
; RV64M-NEXT: lui a1, %hi(.LCPI6_0)
; RV64M-NEXT: addi a1, a1, %lo(.LCPI6_0)
; RV64M-NEXT: and a0, a0, a1
; RV64M-NEXT: lui a1, 4112
; RV64M-NEXT: addiw a1, a1, 257
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: srliw a0, a0, 24
; RV64M-NEXT: ret
; RV64M-NEXT: .LBB10_2:
; RV64M-NEXT: and a0, a0, a1
; RV64M-NEXT: lui a1, 4112
; RV64M-NEXT: addiw a1, a1, 257
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: srliw a0, a0, 24
; RV64M-NEXT: ret
;
; RV64M-NEXT: and a0, a0, a1
; RV64M-NEXT: lui a1, 4112
; RV64M-NEXT: addiw a1, a1, 257
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: srliw a0, a0, 24
; RV64M-NEXT: ret
;
define i32 @test_reassoc_mul_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: test_reassoc_mul_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: mulw a0, a0, a1
-; CHECK-NEXT: mulw a1, a2, a3
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: mul a1, a2, a3
; CHECK-NEXT: mulw a0, a0, a1
; CHECK-NEXT: ret
%t0 = mul i32 %a0, %a1
; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
; CHECK-NEXT: .cfi_offset s0, -16
-; CHECK-NEXT: mulw a0, a0, a0
+; CHECK-NEXT: mul a0, a0, a0
; CHECK-NEXT: addiw s0, a0, 1
; CHECK-NEXT: li a0, 4
; CHECK-NEXT: call __cxa_allocate_exception@plt
; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
; CHECK-NEXT: .cfi_offset s0, -16
-; CHECK-NEXT: mulw a0, a0, a0
+; CHECK-NEXT: mul a0, a0, a0
; CHECK-NEXT: addiw s0, a0, 1
; CHECK-NEXT: li a0, 4
; CHECK-NEXT: call __cxa_allocate_exception@plt
define i32 @foo(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
-; CHECK-NEXT: mulw a0, a0, a0
+; CHECK-NEXT: mul a0, a0, a0
; CHECK-NEXT: addiw a0, a0, 1
-; CHECK-NEXT: mulw a0, a0, a0
+; CHECK-NEXT: mul a0, a0, a0
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: sllw a0, a0, a1
; CHECK-NEXT: not a2, a0
; CHECK-NEXT: add a2, a2, a1
; CHECK-NEXT: addiw a3, a0, 1
-; CHECK-NEXT: mulw a3, a2, a3
+; CHECK-NEXT: mul a3, a2, a3
; CHECK-NEXT: subw a1, a1, a0
; CHECK-NEXT: addiw a1, a1, -2
; CHECK-NEXT: slli a1, a1, 32
; CHECK-NEXT: # %bb.1: # %for.body.preheader
; CHECK-NEXT: not a2, a0
; CHECK-NEXT: add a3, a2, a1
-; CHECK-NEXT: mulw a2, a3, a2
+; CHECK-NEXT: mul a2, a3, a2
; CHECK-NEXT: subw a1, a1, a0
; CHECK-NEXT: addiw a1, a1, -2
; CHECK-NEXT: slli a1, a1, 32
define zeroext i32 @zext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
; RV64IM-LABEL: zext_mulw_aext_aext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
; RV64IM-LABEL: zext_mulw_aext_sext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
; RV64IM-LABEL: zext_mulw_aext_zext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
; RV64IM-LABEL: zext_mulw_sext_aext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
; RV64IM-LABEL: zext_mulw_sext_sext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
; RV64IM-LABEL: zext_mulw_sext_zext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
; RV64IM-LABEL: zext_mulw_zext_aext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
; RV64IM-LABEL: zext_mulw_zext_sext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
define zeroext i32 @zext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
; RV64IM-LABEL: zext_mulw_zext_zext:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: slli a0, a0, 32
; RV64IM-NEXT: srli a0, a0, 32
; RV64IM-NEXT: ret
; CHECK-NEXT: .LBB7_6: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: lw a2, 0(a0)
-; CHECK-NEXT: mulw a2, a2, a1
+; CHECK-NEXT: mul a2, a2, a1
; CHECK-NEXT: sw a2, 0(a0)
; CHECK-NEXT: addi a3, a3, 1
; CHECK-NEXT: addi a0, a0, 4
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: add a0, a0, a1
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.h a0, a0
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.h a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 8
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: add a0, a0, a1
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.b a0, a0
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.b a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: li a1, 127
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 60
; RV64I-NEXT: srai a0, a0, 60
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: slli a1, a1, 60
; RV64I-NEXT: srai a1, a1, 60
; RV64I-NEXT: add a0, a0, a1
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a0, a0, 60
; RV64IZbb-NEXT: srai a0, a0, 60
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: slli a1, a1, 60
; RV64IZbb-NEXT: srai a1, a1, 60
; RV64IZbb-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a2, a0, 4
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: mulw a0, a0, s3
+; RV64I-NEXT: mul a0, a0, s3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: bnez a1, .LBB4_1
; RV64I-NEXT: # %bb.2: # %bb7
; RV64IM-NEXT: sraiw a1, a1, 6
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, 95
-; RV64IM-NEXT: mulw a1, a1, a2
+; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i32 %x, 95
; RV64IM-NEXT: srai a1, a1, 40
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, 1060
-; RV64IM-NEXT: mulw a1, a1, a2
+; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i32 %x, 1060
; RV64IM-NEXT: srai a1, a1, 40
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, -723
-; RV64IM-NEXT: mulw a1, a1, a2
+; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i32 %x, -723
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: lui a2, 1048570
; RV64IM-NEXT: addiw a2, a2, 1595
-; RV64IM-NEXT: mulw a1, a1, a2
+; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i32 %x, -22981
; RV64IM-NEXT: sraiw a1, a1, 6
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, 95
-; RV64IM-NEXT: mulw a2, a1, a2
+; RV64IM-NEXT: mul a2, a1, a2
; RV64IM-NEXT: add a0, a0, a1
; RV64IM-NEXT: subw a0, a0, a2
; RV64IM-NEXT: ret
; RV64M: # %bb.0:
; RV64M-NEXT: lui a1, 128424
; RV64M-NEXT: addiw a1, a1, 331
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: lui a1, 662
; RV64M-NEXT: addiw a1, a1, -83
; RV64M-NEXT: add a0, a0, a1
; RV64MV: # %bb.0:
; RV64MV-NEXT: lui a1, 128424
; RV64MV-NEXT: addiw a1, a1, 331
-; RV64MV-NEXT: mulw a0, a0, a1
+; RV64MV-NEXT: mul a0, a0, a1
; RV64MV-NEXT: lui a1, 662
; RV64MV-NEXT: addiw a1, a1, -83
; RV64MV-NEXT: add a0, a0, a1
; RV64M-NEXT: srli a1, a1, 63
; RV64M-NEXT: add a1, a2, a1
; RV64M-NEXT: li a2, 6
-; RV64M-NEXT: mulw a1, a1, a2
+; RV64M-NEXT: mul a1, a1, a2
; RV64M-NEXT: subw a0, a0, a1
; RV64M-NEXT: andi a0, a0, 15
; RV64M-NEXT: addi a0, a0, -1
; RV64MV-NEXT: srli a1, a1, 63
; RV64MV-NEXT: add a1, a2, a1
; RV64MV-NEXT: li a2, 6
-; RV64MV-NEXT: mulw a1, a1, a2
+; RV64MV-NEXT: mul a1, a1, a2
; RV64MV-NEXT: subw a0, a0, a1
; RV64MV-NEXT: andi a0, a0, 15
; RV64MV-NEXT: addi a0, a0, -1
; RV64IM-NEXT: lui a6, %hi(.LCPI0_1)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_1)(a6)
; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: mulh a3, a1, a6
; RV64IM-NEXT: sub a3, a3, a1
; RV64IM-NEXT: lui a6, %hi(.LCPI0_2)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_2)(a6)
; RV64IM-NEXT: li a7, -124
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a1, a1, a3
; RV64IM-NEXT: mulh a3, a5, a6
; RV64IM-NEXT: srli a6, a3, 63
; RV64IM-NEXT: lui a6, %hi(.LCPI0_3)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_3)(a6)
; RV64IM-NEXT: li a7, 98
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a5, a5, a3
; RV64IM-NEXT: mulh a3, a4, a6
; RV64IM-NEXT: srli a6, a3, 63
; RV64IM-NEXT: srli a3, a3, 7
; RV64IM-NEXT: add a3, a3, a6
; RV64IM-NEXT: li a6, -1003
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: sh a5, 4(a0)
; RV64IM-NEXT: srli a6, a6, 6
; RV64IM-NEXT: add a6, a6, a7
; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mulw a6, a6, a7
+; RV64IM-NEXT: mul a6, a6, a7
; RV64IM-NEXT: subw a2, a2, a6
; RV64IM-NEXT: mulh a6, a1, a3
; RV64IM-NEXT: add a6, a6, a1
; RV64IM-NEXT: srli t0, a6, 63
; RV64IM-NEXT: srli a6, a6, 6
; RV64IM-NEXT: add a6, a6, t0
-; RV64IM-NEXT: mulw a6, a6, a7
+; RV64IM-NEXT: mul a6, a6, a7
; RV64IM-NEXT: subw a1, a1, a6
; RV64IM-NEXT: mulh a6, a5, a3
; RV64IM-NEXT: add a6, a6, a5
; RV64IM-NEXT: srli t0, a6, 63
; RV64IM-NEXT: srli a6, a6, 6
; RV64IM-NEXT: add a6, a6, t0
-; RV64IM-NEXT: mulw a6, a6, a7
+; RV64IM-NEXT: mul a6, a6, a7
; RV64IM-NEXT: subw a5, a5, a6
; RV64IM-NEXT: mulh a3, a4, a3
; RV64IM-NEXT: add a3, a3, a4
; RV64IM-NEXT: srli a6, a3, 63
; RV64IM-NEXT: srli a3, a3, 6
; RV64IM-NEXT: add a3, a3, a6
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: sh a5, 4(a0)
; RV64IM-NEXT: srai a6, a6, 6
; RV64IM-NEXT: add a6, a6, a7
; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mulw t0, a6, a7
+; RV64IM-NEXT: mul t0, a6, a7
; RV64IM-NEXT: mulh t1, a1, a3
; RV64IM-NEXT: add t1, t1, a1
; RV64IM-NEXT: srli t2, t1, 63
; RV64IM-NEXT: srai t1, t1, 6
; RV64IM-NEXT: add t1, t1, t2
-; RV64IM-NEXT: mulw t2, t1, a7
+; RV64IM-NEXT: mul t2, t1, a7
; RV64IM-NEXT: mulh t3, a5, a3
; RV64IM-NEXT: add t3, t3, a5
; RV64IM-NEXT: srli t4, t3, 63
; RV64IM-NEXT: srai t3, t3, 6
; RV64IM-NEXT: add t3, t3, t4
-; RV64IM-NEXT: mulw t4, t3, a7
+; RV64IM-NEXT: mul t4, t3, a7
; RV64IM-NEXT: mulh a3, a4, a3
; RV64IM-NEXT: add a3, a3, a4
; RV64IM-NEXT: srli t5, a3, 63
; RV64IM-NEXT: srai a3, a3, 6
; RV64IM-NEXT: add a3, a3, t5
-; RV64IM-NEXT: mulw a7, a3, a7
+; RV64IM-NEXT: mul a7, a3, a7
; RV64IM-NEXT: add a3, a4, a3
; RV64IM-NEXT: subw a3, a3, a7
; RV64IM-NEXT: add a5, a5, t3
; RV64IM-NEXT: srli a3, a3, 6
; RV64IM-NEXT: add a3, a3, a6
; RV64IM-NEXT: li a6, 95
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: srli a3, a1, 58
; RV64IM-NEXT: add a3, a1, a3
; RV64IM-NEXT: lui a5, %hi(.LCPI4_1)
; RV64IM-NEXT: ld a5, %lo(.LCPI4_1)(a5)
; RV64IM-NEXT: li a6, 23
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: mulh a3, a1, a5
; RV64IM-NEXT: srli a5, a3, 63
; RV64IM-NEXT: lui a5, %hi(.LCPI4_2)
; RV64IM-NEXT: ld a5, %lo(.LCPI4_2)(a5)
; RV64IM-NEXT: li a6, 654
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a1, a1, a3
; RV64IM-NEXT: mulh a3, a4, a5
; RV64IM-NEXT: srli a5, a3, 63
; RV64IM-NEXT: add a3, a3, a5
; RV64IM-NEXT: lui a5, 1
; RV64IM-NEXT: addiw a5, a5, 1327
-; RV64IM-NEXT: mulw a3, a3, a5
+; RV64IM-NEXT: mul a3, a3, a5
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh zero, 0(a0)
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: li a5, 23
; RV64IM-NEXT: lui a6, %hi(.LCPI5_1)
; RV64IM-NEXT: ld a6, %lo(.LCPI5_1)(a6)
-; RV64IM-NEXT: mulw a3, a3, a5
+; RV64IM-NEXT: mul a3, a3, a5
; RV64IM-NEXT: lh a1, 8(a1)
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: mulh a3, a4, a6
; RV64IM-NEXT: add a3, a3, a5
; RV64IM-NEXT: lui a5, 1
; RV64IM-NEXT: addiw a5, a5, 1327
-; RV64IM-NEXT: mulw a3, a3, a5
+; RV64IM-NEXT: mul a3, a3, a5
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: srli a3, a1, 49
; RV64IM-NEXT: add a3, a1, a3
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.h a0, a0
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.h a1, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: lui a1, 8
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.b a0, a0
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.b a1, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: li a1, 127
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 60
; RV64I-NEXT: srai a0, a0, 60
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: slli a1, a1, 60
; RV64I-NEXT: srai a1, a1, 60
; RV64I-NEXT: sub a0, a0, a1
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a0, a0, 60
; RV64IZbb-NEXT: srai a0, a0, 60
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: slli a1, a1, 60
; RV64IZbb-NEXT: srai a1, a1, 60
; RV64IZbb-NEXT: sub a0, a0, a1
;
; RV64I-LABEL: func32:
; RV64I: # %bb.0:
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: addw a1, a0, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sltu a0, a1, a0
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: zext.h a0, a0
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: zext.h a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 16
; RV64I-LABEL: func8:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: andi a1, a1, 255
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: li a1, 255
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 255
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 255
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: li a1, 255
; RV64I-LABEL: func4:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a0, a0, 15
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: andi a1, a1, 15
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: li a1, 15
; RV64IZbb-LABEL: func4:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 15
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 15
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: li a1, 15
; RV64IM-NEXT: add a1, a2, a1
; RV64IM-NEXT: srli a1, a1, 6
; RV64IM-NEXT: li a2, 95
-; RV64IM-NEXT: mulw a1, a1, a2
+; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = urem i32 %x, 95
; RV64IM-NEXT: mulhu a1, a1, a2
; RV64IM-NEXT: srli a1, a1, 42
; RV64IM-NEXT: li a2, 1060
-; RV64IM-NEXT: mulw a1, a1, a2
+; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = urem i32 %x, 1060
; RV64IM-NEXT: add a1, a2, a1
; RV64IM-NEXT: srli a1, a1, 6
; RV64IM-NEXT: li a2, 95
-; RV64IM-NEXT: mulw a2, a1, a2
+; RV64IM-NEXT: mul a2, a1, a2
; RV64IM-NEXT: add a0, a0, a1
; RV64IM-NEXT: subw a0, a0, a2
; RV64IM-NEXT: ret
; RV64M: # %bb.0:
; RV64M-NEXT: lui a1, 1
; RV64M-NEXT: addiw a1, a1, -819
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: slli a0, a0, 51
; RV64M-NEXT: srli a0, a0, 51
; RV64M-NEXT: sltiu a0, a0, 1639
; RV64MV: # %bb.0:
; RV64MV-NEXT: lui a1, 1
; RV64MV-NEXT: addiw a1, a1, -819
-; RV64MV-NEXT: mulw a0, a0, a1
+; RV64MV-NEXT: mul a0, a0, a1
; RV64MV-NEXT: slli a0, a0, 51
; RV64MV-NEXT: srli a0, a0, 51
; RV64MV-NEXT: sltiu a0, a0, 1639
; RV64M: # %bb.0:
; RV64M-NEXT: lui a1, 28087
; RV64M-NEXT: addiw a1, a1, -585
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: slli a1, a0, 26
; RV64M-NEXT: slli a0, a0, 37
; RV64M-NEXT: srli a0, a0, 38
; RV64MV: # %bb.0:
; RV64MV-NEXT: lui a1, 28087
; RV64MV-NEXT: addiw a1, a1, -585
-; RV64MV-NEXT: mulw a0, a0, a1
+; RV64MV-NEXT: mul a0, a0, a1
; RV64MV-NEXT: slli a1, a0, 26
; RV64MV-NEXT: slli a0, a0, 37
; RV64MV-NEXT: srli a0, a0, 38
; RV64M-LABEL: test_urem_negative_odd:
; RV64M: # %bb.0:
; RV64M-NEXT: li a1, 307
-; RV64M-NEXT: mulw a0, a0, a1
+; RV64M-NEXT: mul a0, a0, a1
; RV64M-NEXT: andi a0, a0, 511
; RV64M-NEXT: sltiu a0, a0, 2
; RV64M-NEXT: xori a0, a0, 1
; RV64MV-LABEL: test_urem_negative_odd:
; RV64MV: # %bb.0:
; RV64MV-NEXT: li a1, 307
-; RV64MV-NEXT: mulw a0, a0, a1
+; RV64MV-NEXT: mul a0, a0, a1
; RV64MV-NEXT: andi a0, a0, 511
; RV64MV-NEXT: sltiu a0, a0, 2
; RV64MV-NEXT: xori a0, a0, 1
; RV64M-NEXT: srli a3, a1, 11
; RV64M-NEXT: andi a1, a1, 2047
; RV64M-NEXT: li a4, 683
-; RV64M-NEXT: mulw a1, a1, a4
+; RV64M-NEXT: mul a1, a1, a4
; RV64M-NEXT: slli a4, a1, 10
; RV64M-NEXT: slli a1, a1, 53
; RV64M-NEXT: srli a1, a1, 54
; RV64M-NEXT: andi a1, a1, 2047
; RV64M-NEXT: sltiu a1, a1, 342
; RV64M-NEXT: li a4, 1463
-; RV64M-NEXT: mulw a3, a3, a4
+; RV64M-NEXT: mul a3, a3, a4
; RV64M-NEXT: addiw a3, a3, -1463
; RV64M-NEXT: andi a3, a3, 2047
; RV64M-NEXT: sltiu a3, a3, 293
; RV64M-NEXT: li a4, 819
-; RV64M-NEXT: mulw a2, a2, a4
+; RV64M-NEXT: mul a2, a2, a4
; RV64M-NEXT: addiw a2, a2, -1638
; RV64M-NEXT: andi a2, a2, 2047
; RV64M-NEXT: sltiu a2, a2, 2
; RV64IM-NEXT: lui a6, %hi(.LCPI0_1)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_1)(a6)
; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: mulhu a3, a1, a6
; RV64IM-NEXT: lui a6, %hi(.LCPI0_2)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_2)(a6)
; RV64IM-NEXT: li a7, 124
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a1, a1, a3
; RV64IM-NEXT: mulhu a3, a5, a6
; RV64IM-NEXT: lui a6, %hi(.LCPI0_3)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_3)(a6)
; RV64IM-NEXT: li a7, 98
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a5, a5, a3
; RV64IM-NEXT: mulhu a3, a4, a6
; RV64IM-NEXT: li a6, 1003
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: sh a5, 4(a0)
; RV64IM-NEXT: lhu a1, 8(a1)
; RV64IM-NEXT: mulhu a6, a2, a3
; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mulw a6, a6, a7
+; RV64IM-NEXT: mul a6, a6, a7
; RV64IM-NEXT: subw a2, a2, a6
; RV64IM-NEXT: mulhu a6, a1, a3
-; RV64IM-NEXT: mulw a6, a6, a7
+; RV64IM-NEXT: mul a6, a6, a7
; RV64IM-NEXT: subw a1, a1, a6
; RV64IM-NEXT: mulhu a6, a5, a3
-; RV64IM-NEXT: mulw a6, a6, a7
+; RV64IM-NEXT: mul a6, a6, a7
; RV64IM-NEXT: subw a5, a5, a6
; RV64IM-NEXT: mulhu a3, a4, a3
-; RV64IM-NEXT: mulw a3, a3, a7
+; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: sh a5, 4(a0)
; RV64IM-NEXT: lhu a1, 16(a1)
; RV64IM-NEXT: mulhu a6, a2, a3
; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mulw t0, a6, a7
+; RV64IM-NEXT: mul t0, a6, a7
; RV64IM-NEXT: mulhu t1, a1, a3
-; RV64IM-NEXT: mulw t2, t1, a7
+; RV64IM-NEXT: mul t2, t1, a7
; RV64IM-NEXT: mulhu t3, a5, a3
-; RV64IM-NEXT: mulw t4, t3, a7
+; RV64IM-NEXT: mul t4, t3, a7
; RV64IM-NEXT: mulhu a3, a4, a3
-; RV64IM-NEXT: mulw a7, a3, a7
+; RV64IM-NEXT: mul a7, a3, a7
; RV64IM-NEXT: add a3, a4, a3
; RV64IM-NEXT: subw a3, a3, a7
; RV64IM-NEXT: add a5, a5, t3
; RV64IM-NEXT: lhu a1, 0(a1)
; RV64IM-NEXT: mulhu a3, a2, a3
; RV64IM-NEXT: li a6, 95
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: andi a1, a1, 63
; RV64IM-NEXT: andi a5, a5, 31
; RV64IM-NEXT: lui a5, %hi(.LCPI4_1)
; RV64IM-NEXT: ld a5, %lo(.LCPI4_1)(a5)
; RV64IM-NEXT: li a6, 654
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a2, a2, a3
; RV64IM-NEXT: mulhu a3, a1, a5
; RV64IM-NEXT: lui a5, %hi(.LCPI4_2)
; RV64IM-NEXT: ld a5, %lo(.LCPI4_2)(a5)
; RV64IM-NEXT: li a6, 23
-; RV64IM-NEXT: mulw a3, a3, a6
+; RV64IM-NEXT: mul a3, a3, a6
; RV64IM-NEXT: subw a1, a1, a3
; RV64IM-NEXT: mulhu a3, a4, a5
; RV64IM-NEXT: lui a5, 1
; RV64IM-NEXT: addiw a5, a5, 1327
-; RV64IM-NEXT: mulw a3, a3, a5
+; RV64IM-NEXT: mul a3, a3, a5
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh zero, 0(a0)
; RV64IM-NEXT: sh a4, 6(a0)
;
; RV64I-LABEL: func32:
; RV64I: # %bb.0:
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: subw a1, a0, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sltu a0, a0, a1
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: zext.h a0, a0
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: zext.h a1, a1
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64I-LABEL: func8:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: andi a1, a1, 255
; RV64I-NEXT: sub a1, a0, a1
; RV64I-NEXT: sltu a0, a0, a1
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 255
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 255
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64I-LABEL: func4:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a0, a0, 15
-; RV64I-NEXT: mulw a1, a1, a2
+; RV64I-NEXT: mul a1, a1, a2
; RV64I-NEXT: andi a1, a1, 15
; RV64I-NEXT: sub a1, a0, a1
; RV64I-NEXT: sltu a0, a0, a1
; RV64IZbb-LABEL: func4:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 15
-; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 15
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1