N->getValueType(0));
}]>;
+// Check if (mul r, imm) can be optimized to (SLLI (ALSL r, r, i0), i1),
+// in which imm = (1 + (1 << i0)) << i1.
+def AlslSlliImm : PatLeaf<(imm), [{
+ if (!N->hasOneUse())
+ return false;
+ uint64_t Imm = N->getZExtValue();
+ unsigned I1 = llvm::countr_zero(Imm);
+ uint64_t Rem = Imm >> I1;
+ return Rem == 3 || Rem == 5 || Rem == 9 || Rem == 17;
+}]>;
+
+def AlslSlliImmI1 : SDNodeXForm<imm, [{
+ uint64_t Imm = N->getZExtValue();
+ unsigned I1 = llvm::countr_zero(Imm);
+ return CurDAG->getTargetConstant(I1, SDLoc(N),
+ N->getValueType(0));
+}]>;
+
+def AlslSlliImmI0 : SDNodeXForm<imm, [{
+ uint64_t Imm = N->getZExtValue();
+ unsigned I1 = llvm::countr_zero(Imm);
+ uint64_t I0;
+ switch (Imm >> I1) {
+ case 3: I0 = 1; break;
+ case 5: I0 = 2; break;
+ case 9: I0 = 3; break;
+ default: I0 = 4; break;
+ }
+ return CurDAG->getTargetConstant(I0, SDLoc(N),
+ N->getValueType(0));
+}]>;
+
//===----------------------------------------------------------------------===//
// Instruction Formats
//===----------------------------------------------------------------------===//
}
} // Predicates = [IsLA64]
+let Predicates = [IsLA32] in {
+def : Pat<(mul GPR:$rj, (AlslSlliImm:$im)),
+ (SLLI_W (ALSL_W GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
+ (AlslSlliImmI1 AlslSlliImm:$im))>;
+} // Predicates = [IsLA32]
+
+let Predicates = [IsLA64] in {
+def : Pat<(sext_inreg (mul GPR:$rj, (AlslSlliImm:$im)), i32),
+ (SLLI_W (ALSL_W GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
+ (AlslSlliImmI1 AlslSlliImm:$im))>;
+def : Pat<(mul GPR:$rj, (AlslSlliImm:$im)),
+ (SLLI_D (ALSL_D GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
+ (AlslSlliImmI1 AlslSlliImm:$im))>;
+} // Predicates = [IsLA64]
+
foreach Idx = 1...7 in {
defvar ShamtA = !mul(8, Idx);
defvar ShamtB = !mul(8, !sub(8, Idx));
define signext i32 @mul_i32_768(i32 %a) {
; LA32-LABEL: mul_i32_768:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a1, $zero, 768
-; LA32-NEXT: mul.w $a0, $a0, $a1
+; LA32-NEXT: alsl.w $a0, $a0, $a0, 1
+; LA32-NEXT: slli.w $a0, $a0, 8
; LA32-NEXT: ret
;
; LA64-LABEL: mul_i32_768:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 768
-; LA64-NEXT: mul.d $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: alsl.w $a0, $a0, $a0, 1
+; LA64-NEXT: slli.w $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i32 %a, 768
ret i32 %b
define signext i32 @mul_i32_1280(i32 %a) {
; LA32-LABEL: mul_i32_1280:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a1, $zero, 1280
-; LA32-NEXT: mul.w $a0, $a0, $a1
+; LA32-NEXT: alsl.w $a0, $a0, $a0, 2
+; LA32-NEXT: slli.w $a0, $a0, 8
; LA32-NEXT: ret
;
; LA64-LABEL: mul_i32_1280:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 1280
-; LA64-NEXT: mul.d $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: alsl.w $a0, $a0, $a0, 2
+; LA64-NEXT: slli.w $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i32 %a, 1280
ret i32 %b
define signext i32 @mul_i32_2304(i32 %a) {
; LA32-LABEL: mul_i32_2304:
; LA32: # %bb.0:
-; LA32-NEXT: ori $a1, $zero, 2304
-; LA32-NEXT: mul.w $a0, $a0, $a1
+; LA32-NEXT: alsl.w $a0, $a0, $a0, 3
+; LA32-NEXT: slli.w $a0, $a0, 8
; LA32-NEXT: ret
;
; LA64-LABEL: mul_i32_2304:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 2304
-; LA64-NEXT: mul.d $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: alsl.w $a0, $a0, $a0, 3
+; LA64-NEXT: slli.w $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i32 %a, 2304
ret i32 %b
define signext i32 @mul_i32_4352(i32 %a) {
; LA32-LABEL: mul_i32_4352:
; LA32: # %bb.0:
-; LA32-NEXT: lu12i.w $a1, 1
-; LA32-NEXT: ori $a1, $a1, 256
-; LA32-NEXT: mul.w $a0, $a0, $a1
+; LA32-NEXT: alsl.w $a0, $a0, $a0, 4
+; LA32-NEXT: slli.w $a0, $a0, 8
; LA32-NEXT: ret
;
; LA64-LABEL: mul_i32_4352:
; LA64: # %bb.0:
-; LA64-NEXT: lu12i.w $a1, 1
-; LA64-NEXT: ori $a1, $a1, 256
-; LA64-NEXT: mul.d $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: alsl.w $a0, $a0, $a0, 4
+; LA64-NEXT: slli.w $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i32 %a, 4352
ret i32 %b
;
; LA64-LABEL: mul_i64_768:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 768
-; LA64-NEXT: mul.d $a0, $a0, $a1
+; LA64-NEXT: alsl.d $a0, $a0, $a0, 1
+; LA64-NEXT: slli.d $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i64 %a, 768
ret i64 %b
;
; LA64-LABEL: mul_i64_1280:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 1280
-; LA64-NEXT: mul.d $a0, $a0, $a1
+; LA64-NEXT: alsl.d $a0, $a0, $a0, 2
+; LA64-NEXT: slli.d $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i64 %a, 1280
ret i64 %b
;
; LA64-LABEL: mul_i64_2304:
; LA64: # %bb.0:
-; LA64-NEXT: ori $a1, $zero, 2304
-; LA64-NEXT: mul.d $a0, $a0, $a1
+; LA64-NEXT: alsl.d $a0, $a0, $a0, 3
+; LA64-NEXT: slli.d $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i64 %a, 2304
ret i64 %b
;
; LA64-LABEL: mul_i64_4352:
; LA64: # %bb.0:
-; LA64-NEXT: lu12i.w $a1, 1
-; LA64-NEXT: ori $a1, $a1, 256
-; LA64-NEXT: mul.d $a0, $a0, $a1
+; LA64-NEXT: alsl.d $a0, $a0, $a0, 4
+; LA64-NEXT: slli.d $a0, $a0, 8
; LA64-NEXT: ret
%b = mul i64 %a, 4352
ret i64 %b