// minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
// vscale as VLENB / 8.
assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
+ if (isa<ConstantSDNode>(Op.getOperand(0))) {
+ // We assume VLENB is a multiple of 8. We manually choose the best shift
+ // here because SimplifyDemandedBits isn't always able to simplify it.
+ uint64_t Val = Op.getConstantOperandVal(0);
+ if (isPowerOf2_64(Val)) {
+ uint64_t Log2 = Log2_64(Val);
+ if (Log2 < 3)
+ return DAG.getNode(ISD::SRL, DL, VT, VLENB,
+ DAG.getConstant(3 - Log2, DL, VT));
+ if (Log2 > 3)
+ return DAG.getNode(ISD::SHL, DL, VT, VLENB,
+ DAG.getConstant(Log2 - 3, DL, VT));
+ return VLENB;
+ }
+ // If the multiplier is a multiple of 8, scale it down to avoid needing
+ // to shift the VLENB value.
+ if ((Val % 8) == 0)
+ return DAG.getNode(ISD::MUL, DL, VT, VLENB,
+ DAG.getConstant(Val / 8, DL, VT));
+ }
+
SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
DAG.getConstant(3, DL, VT));
return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
; CHECK-LABEL: ret_split_nxv64i32:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a2, a2, 3
-; CHECK-NEXT: slli a3, a2, 6
+; CHECK-NEXT: slli a3, a2, 3
; CHECK-NEXT: add a4, a1, a3
; CHECK-NEXT: vl8re32.v v8, (a4)
-; CHECK-NEXT: slli a4, a2, 7
-; CHECK-NEXT: addi a5, zero, 192
+; CHECK-NEXT: slli a4, a2, 4
+; CHECK-NEXT: addi a5, zero, 24
; CHECK-NEXT: mul a2, a2, a5
; CHECK-NEXT: add a5, a1, a4
; CHECK-NEXT: vl8re32.v v16, (a1)
; CHECK-NEXT: slli a2, a2, 5
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a2, a2, 3
-; CHECK-NEXT: slli a6, a2, 6
+; CHECK-NEXT: slli a6, a2, 3
; CHECK-NEXT: add a4, a1, a6
; CHECK-NEXT: vl8re32.v v8, (a4)
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: add a3, sp, a3
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT: slli a7, a2, 7
+; CHECK-NEXT: slli a7, a2, 4
; CHECK-NEXT: add a5, a1, a7
; CHECK-NEXT: vl8re32.v v8, (a5)
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: add a3, sp, a3
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT: addi a5, zero, 192
+; CHECK-NEXT: addi a5, zero, 24
; CHECK-NEXT: mul t1, a2, a5
; CHECK-NEXT: add a3, a1, t1
; CHECK-NEXT: vl8re32.v v8, (a3)
; CHECK-NEXT: add a3, sp, a3
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT: slli t3, a2, 8
+; CHECK-NEXT: slli t3, a2, 5
; CHECK-NEXT: add a4, a1, t3
; CHECK-NEXT: vl8re32.v v8, (a4)
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT: addi a4, zero, 320
+; CHECK-NEXT: addi a4, zero, 40
; CHECK-NEXT: mul a4, a2, a4
; CHECK-NEXT: add t0, a1, a4
-; CHECK-NEXT: addi a5, zero, 384
+; CHECK-NEXT: addi a5, zero, 48
; CHECK-NEXT: mul a5, a2, a5
; CHECK-NEXT: add t2, a1, a5
-; CHECK-NEXT: addi a3, zero, 448
+; CHECK-NEXT: addi a3, zero, 56
; CHECK-NEXT: mul a2, a2, a3
; CHECK-NEXT: add a3, a1, a2
; CHECK-NEXT: vl8re32.v v8, (a1)
; CHECK-LABEL: insert_nxv16i8_nxv1i8_2:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: slli a1, a0, 1
-; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT: vslideup.vx v8, v10, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
ret <vscale x 16 x i8> %v
; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_26:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: slli a1, a0, 1
-; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vslideup.vx v22, v8, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vslideup.vx v22, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: slli a1, a0, 1
-; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v26, v26, 1, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT: vslideup.vx v25, v26, a1
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vslideup.vx v25, v26, a0
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: ret
; RV32-NEXT: vsetvli a0, zero, e64, m8, tu, mu
; RV32-NEXT: vloxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: srli a0, a0, 3
-; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
-; RV32-NEXT: vslidedown.vx v0, v0, a0
+; RV32-NEXT: srli a2, a0, 3
+; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
+; RV32-NEXT: vslidedown.vx v0, v0, a2
; RV32-NEXT: vsetvli a2, zero, e64, m8, tu, mu
; RV32-NEXT: vloxei32.v v24, (zero), v12, v0.t
-; RV32-NEXT: slli a0, a0, 6
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: vs8r.v v24, (a0)
; RV32-NEXT: vs8r.v v16, (a1)
; RV64-NEXT: vsetvli a0, zero, e64, m8, tu, mu
; RV64-NEXT: vloxei64.v v24, (zero), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: srli a0, a0, 3
-; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
-; RV64-NEXT: vslidedown.vx v0, v0, a0
+; RV64-NEXT: srli a1, a0, 3
+; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
+; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, tu, mu
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vloxei64.v v8, (zero), v16, v0.t
-; RV64-NEXT: slli a0, a0, 6
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, a2, a0
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: vs8r.v v24, (a2)
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v25, v0
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf8 v16, v8
-; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu
-; RV64-NEXT: vloxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: srli a2, a1, 2
+; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, mu
+; RV64-NEXT: vslidedown.vx v26, v0, a2
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
-; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: vslidedown.vx v0, v26, a1
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf8 v16, v9
-; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu
-; RV64-NEXT: vloxei64.v v13, (a0), v16, v0.t
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, mu
-; RV64-NEXT: vslidedown.vx v25, v25, a2
-; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
-; RV64-NEXT: vslidedown.vx v0, v25, a1
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu
; RV64-NEXT: vloxei64.v v15, (a0), v16, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu
-; RV64-NEXT: vmv1r.v v0, v25
+; RV64-NEXT: vmv1r.v v0, v26
; RV64-NEXT: vloxei64.v v14, (a0), v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vsext.vf8 v16, v8
+; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu
+; RV64-NEXT: vmv1r.v v0, v25
+; RV64-NEXT: vloxei64.v v12, (a0), v16, v0.t
+; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
+; RV64-NEXT: vslidedown.vx v0, v25, a1
+; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; RV64-NEXT: vsext.vf8 v16, v9
+; RV64-NEXT: vsetvli zero, zero, e8, m1, tu, mu
+; RV64-NEXT: vloxei64.v v13, (a0), v16, v0.t
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <vscale x 32 x i8> %idxs
; CHECK-LABEL: vscale_non_pow2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: addi a1, zero, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: ret
entry:
%0 = call i32 @llvm.vscale.i32()
;
; RV32-LABEL: vscale_uimmpow2xlen:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: srli a1, a0, 29
-; RV32-NEXT: srli a0, a0, 3
-; RV32-NEXT: slli a0, a0, 6
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a0, a1, 3
+; RV32-NEXT: srli a1, a1, 29
; RV32-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
; RV64-LABEL: vscale_non_pow2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: srli a0, a0, 3
-; RV64-NEXT: addi a1, zero, 24
-; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: ret
;
; RV32-LABEL: vscale_non_pow2:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: srli a1, a0, 3
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a0, a1, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: addi a2, zero, 24
-; RV32-NEXT: mul a0, a1, a2
; RV32-NEXT: mulhu a1, a1, a2
; RV32-NEXT: ret
entry:
; CHECK-NEXT: addi a0, zero, 3
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: addi a1, zero, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vadd.vx v16, v8, a0
; CHECK-NEXT: ret
entry:
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vmand.mm v25, v0, v25
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vslidedown.vx v0, v25, a0
+; CHECK-NEXT: srli a2, a0, 3
+; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vslidedown.vx v0, v25, a2
; CHECK-NEXT: vmv1r.v v2, v25
; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv.v.i v24, 0
; CHECK-NEXT: vmv1r.v v0, v2
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: slli a0, a0, 6
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vs8r.v v16, (a0)
; CHECK-NEXT: ret
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vmand.mm v25, v0, v25
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vslidedown.vx v0, v25, a0
+; CHECK-NEXT: srli a2, a0, 3
+; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vslidedown.vx v0, v25, a2
; CHECK-NEXT: vmv1r.v v2, v25
; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv.v.i v24, 0
; CHECK-NEXT: vmv1r.v v0, v2
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: slli a0, a0, 6
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vs8r.v v16, (a0)
; CHECK-NEXT: ret