/* Return 'REQUIREMENT' for machine_mode 'MODE'.
For example: 'MODE' = VNx64BImode needs TARGET_MIN_VLEN > 32. */
#ifndef ENTRY
-#define ENTRY(MODE, REQUIREMENT)
+#define ENTRY(MODE, REQUIREMENT, VLMUL_FOR_MIN_VLEN32, RATIO_FOR_MIN_VLEN32, \
+ VLMUL_FOR_MIN_VLEN64, RATIO_FOR_MIN_VLEN64)
#endif
/* Flag of FP32 vector. */
#ifndef TARGET_VECTOR_FP32
#endif
/* Mask modes. Disable VNx64BImode when TARGET_MIN_VLEN == 32. */
-ENTRY (VNx64BI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx32BI, true)
-ENTRY (VNx16BI, true)
-ENTRY (VNx8BI, true)
-ENTRY (VNx4BI, true)
-ENTRY (VNx2BI, true)
-ENTRY (VNx1BI, true)
+ENTRY (VNx64BI, TARGET_MIN_VLEN > 32, LMUL_F8, 64, LMUL_RESERVED, 0)
+ENTRY (VNx32BI, true, LMUL_F4, 32, LMUL_RESERVED, 0)
+ENTRY (VNx16BI, true, LMUL_F2, 16, LMUL_RESERVED, 0)
+ENTRY (VNx8BI, true, LMUL_1, 8, LMUL_RESERVED, 0)
+ENTRY (VNx4BI, true, LMUL_2, 4, LMUL_RESERVED, 0)
+ENTRY (VNx2BI, true, LMUL_4, 2, LMUL_RESERVED, 0)
+ENTRY (VNx1BI, true, LMUL_8, 1, LMUL_RESERVED, 0)
/* SEW = 8. Disable VNx64QImode when TARGET_MIN_VLEN == 32. */
-ENTRY (VNx64QI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx32QI, true)
-ENTRY (VNx16QI, true)
-ENTRY (VNx8QI, true)
-ENTRY (VNx4QI, true)
-ENTRY (VNx2QI, true)
-ENTRY (VNx1QI, true)
+ENTRY (VNx64QI, TARGET_MIN_VLEN > 32, LMUL_8, 1, LMUL_RESERVED, 0)
+ENTRY (VNx32QI, true, LMUL_8, 1, LMUL_4, 2)
+ENTRY (VNx16QI, true, LMUL_4, 2, LMUL_2, 4)
+ENTRY (VNx8QI, true, LMUL_2, 4, LMUL_1, 8)
+ENTRY (VNx4QI, true, LMUL_1, 8, LMUL_F2, 16)
+ENTRY (VNx2QI, true, LMUL_F2, 16, LMUL_F4, 32)
+ENTRY (VNx1QI, true, LMUL_F4, 32, LMUL_F8, 64)
/* SEW = 16. Disable VNx32HImode when TARGET_MIN_VLEN == 32. */
-ENTRY (VNx32HI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx16HI, true)
-ENTRY (VNx8HI, true)
-ENTRY (VNx4HI, true)
-ENTRY (VNx2HI, true)
-ENTRY (VNx1HI, true)
+ENTRY (VNx32HI, TARGET_MIN_VLEN > 32, LMUL_8, 2, LMUL_RESERVED, 0)
+ENTRY (VNx16HI, true, LMUL_8, 2, LMUL_4, 4)
+ENTRY (VNx8HI, true, LMUL_4, 4, LMUL_2, 8)
+ENTRY (VNx4HI, true, LMUL_2, 8, LMUL_1, 16)
+ENTRY (VNx2HI, true, LMUL_1, 16, LMUL_F2, 32)
+ENTRY (VNx1HI, true, LMUL_F2, 32, LMUL_F4, 64)
/* TODO:Disable all FP16 vector, enable them when 'zvfh' is supported. */
-ENTRY (VNx32HF, false)
-ENTRY (VNx16HF, false)
-ENTRY (VNx8HF, false)
-ENTRY (VNx4HF, false)
-ENTRY (VNx2HF, false)
-ENTRY (VNx1HF, false)
+ENTRY (VNx32HF, false, LMUL_8, 2, LMUL_RESERVED, 0)
+ENTRY (VNx16HF, false, LMUL_8, 2, LMUL_4, 4)
+ENTRY (VNx8HF, false, LMUL_4, 4, LMUL_2, 8)
+ENTRY (VNx4HF, false, LMUL_2, 8, LMUL_1, 16)
+ENTRY (VNx2HF, false, LMUL_1, 16, LMUL_F2, 32)
+ENTRY (VNx1HF, false, LMUL_F2, 32, LMUL_F4, 64)
/* SEW = 32. Disable VNx16SImode when TARGET_MIN_VLEN == 32.
For single-precision floating-point, we need TARGET_VECTOR_FP32 ==
RVV_ENABLE. */
-ENTRY (VNx16SI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx8SI, true)
-ENTRY (VNx4SI, true)
-ENTRY (VNx2SI, true)
-ENTRY (VNx1SI, true)
-
-ENTRY (VNx16SF, TARGET_VECTOR_FP32 && (TARGET_MIN_VLEN > 32))
-ENTRY (VNx8SF, TARGET_VECTOR_FP32)
-ENTRY (VNx4SF, TARGET_VECTOR_FP32)
-ENTRY (VNx2SF, TARGET_VECTOR_FP32)
-ENTRY (VNx1SF, TARGET_VECTOR_FP32)
+ENTRY (VNx16SI, TARGET_MIN_VLEN > 32, LMUL_8, 4, LMUL_RESERVED, 0)
+ENTRY (VNx8SI, true, LMUL_8, 4, LMUL_4, 8)
+ENTRY (VNx4SI, true, LMUL_4, 8, LMUL_2, 4)
+ENTRY (VNx2SI, true, LMUL_2, 16, LMUL_1, 2)
+ENTRY (VNx1SI, true, LMUL_1, 32, LMUL_F2, 1)
+
+ENTRY (VNx16SF, TARGET_VECTOR_FP32 && (TARGET_MIN_VLEN > 32), LMUL_8, 4,
+ LMUL_RESERVED, 0)
+ENTRY (VNx8SF, TARGET_VECTOR_FP32, LMUL_8, 4, LMUL_4, 8)
+ENTRY (VNx4SF, TARGET_VECTOR_FP32, LMUL_4, 8, LMUL_2, 4)
+ENTRY (VNx2SF, TARGET_VECTOR_FP32, LMUL_2, 16, LMUL_1, 2)
+ENTRY (VNx1SF, TARGET_VECTOR_FP32, LMUL_1, 32, LMUL_F2, 1)
/* SEW = 64. Enable when TARGET_MIN_VLEN > 32.
For double-precision floating-point, we need TARGET_VECTOR_FP64 ==
RVV_ENABLE. */
-ENTRY (VNx8DI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx4DI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx2DI, TARGET_MIN_VLEN > 32)
-ENTRY (VNx1DI, TARGET_MIN_VLEN > 32)
-
-ENTRY (VNx8DF, TARGET_VECTOR_FP64 && (TARGET_MIN_VLEN > 32))
-ENTRY (VNx4DF, TARGET_VECTOR_FP64)
-ENTRY (VNx2DF, TARGET_VECTOR_FP64)
-ENTRY (VNx1DF, TARGET_VECTOR_FP64)
+ENTRY (VNx8DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_8, 8)
+ENTRY (VNx4DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_4, 16)
+ENTRY (VNx2DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_2, 32)
+ENTRY (VNx1DI, TARGET_MIN_VLEN > 32, LMUL_RESERVED, 0, LMUL_1, 64)
+
+ENTRY (VNx8DF, TARGET_VECTOR_FP64 && (TARGET_MIN_VLEN > 32), LMUL_RESERVED, 0,
+ LMUL_8, 8)
+ENTRY (VNx4DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_4, 16)
+ENTRY (VNx2DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_2, 32)
+ENTRY (VNx1DF, TARGET_VECTOR_FP64, LMUL_RESERVED, 0, LMUL_1, 64)
#undef TARGET_VECTOR_FP32
#undef TARGET_VECTOR_FP64
UNSPEC_VPREDICATE
])
+(define_constants [
+ (INVALID_ATTRIBUTE 255)
+])
+
+;; True if the type is RVV instructions that include VTYPE
+;; global status register in the use op list.
+;; We known VTYPE has 4 fields: SEW, LMUL, TA, MA.
+;; The instruction need any of VTYPE field is set as true
+;; in this attribute.
+(define_attr "has_vtype_op" "false,true"
+ (cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
+ vldux,vldox,vstux,vstox,vldff,\
+ vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,\
+ vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
+ vsalu,vaalu,vsmul,vsshift,vnclip,\
+ vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
+ vfcmp,vfsgnj,vfclass,vfmerge,vfmov,\
+ vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
+ vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
+ vired,viwred,vfred,vfredo,vfwred,vfwredo,\
+ vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,\
+ vislide,vislide1,vfslide1,vgather,vcompress")
+ (const_string "true")]
+ (const_string "false")))
+
+;; True if the type is RVV instructions that include VL
+;; global status register in the use op list.
+;; The instruction need vector length to be specified is set
+;; in this attribute.
+(define_attr "has_vl_op" "false,true"
+ (cond [(eq_attr "type" "vlde,vste,vldm,vstm,vlds,vsts,\
+ vldux,vldox,vstux,vstox,vldff,\
+ vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,\
+ vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,\
+ vsalu,vaalu,vsmul,vsshift,vnclip,\
+ vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,\
+ vfcmp,vfsgnj,vfclass,vfmerge,vfmov,\
+ vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,\
+ vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
+ vired,viwred,vfred,vfredo,vfwred,vfwredo,\
+ vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovxv,vfmovfv,\
+ vislide,vislide1,vfslide1,vgather,vcompress")
+ (const_string "true")]
+ (const_string "false")))
+
+;; The default SEW of RVV instruction. This attribute doesn't mean the instruction
+;; is necessary to require SEW check for example vlm.v which require ratio to
+;; check. However, we need default value of SEW for vsetvl instruction since there
+;; is no field for ratio in the vsetvl instruction encoding.
+(define_attr "sew" ""
+ (cond [(eq_attr "mode" "VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,\
+ VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI")
+ (const_int 8)
+ (eq_attr "mode" "VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI")
+ (const_int 16)
+ (eq_attr "mode" "VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,\
+ VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF")
+ (const_int 32)
+ (eq_attr "mode" "VNx1DI,VNx2DI,VNx4DI,VNx8DI,\
+ VNx1DF,VNx2DF,VNx4DF,VNx8DF")
+ (const_int 64)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; Ditto to LMUL.
+(define_attr "vlmul" ""
+ (cond [(eq_attr "mode" "VNx1QI,VNx1BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1QImode)")
+ (eq_attr "mode" "VNx2QI,VNx2BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2QImode)")
+ (eq_attr "mode" "VNx4QI,VNx4BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4QImode)")
+ (eq_attr "mode" "VNx8QI,VNx8BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8QImode)")
+ (eq_attr "mode" "VNx16QI,VNx16BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx16QImode)")
+ (eq_attr "mode" "VNx32QI,VNx32BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx32QImode)")
+ (eq_attr "mode" "VNx64QI,VNx64BI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx64QImode)")
+ (eq_attr "mode" "VNx1HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1HImode)")
+ (eq_attr "mode" "VNx2HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2HImode)")
+ (eq_attr "mode" "VNx4HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4HImode)")
+ (eq_attr "mode" "VNx8HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8HImode)")
+ (eq_attr "mode" "VNx16HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx16HImode)")
+ (eq_attr "mode" "VNx32HI")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx32HImode)")
+ (eq_attr "mode" "VNx1SI,VNx1SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1SImode)")
+ (eq_attr "mode" "VNx2SI,VNx2SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2SImode)")
+ (eq_attr "mode" "VNx4SI,VNx4SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4SImode)")
+ (eq_attr "mode" "VNx8SI,VNx8SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8SImode)")
+ (eq_attr "mode" "VNx16SI,VNx16SF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx16SImode)")
+ (eq_attr "mode" "VNx1DI,VNx1DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx1DImode)")
+ (eq_attr "mode" "VNx2DI,VNx2DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx2DImode)")
+ (eq_attr "mode" "VNx4DI,VNx4DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx4DImode)")
+ (eq_attr "mode" "VNx8DI,VNx8DF")
+ (symbol_ref "riscv_vector::get_vlmul(E_VNx8DImode)")]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; It is valid for instruction that require sew/lmul ratio.
+(define_attr "ratio" ""
+ (cond [(eq_attr "type" "vimov,vfmov")
+ (const_int INVALID_ATTRIBUTE)
+ (eq_attr "mode" "VNx1QI,VNx1BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
+ (eq_attr "mode" "VNx2QI,VNx2BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2QImode)")
+ (eq_attr "mode" "VNx4QI,VNx4BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4QImode)")
+ (eq_attr "mode" "VNx8QI,VNx8BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8QImode)")
+ (eq_attr "mode" "VNx16QI,VNx16BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx16QImode)")
+ (eq_attr "mode" "VNx32QI,VNx32BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx32QImode)")
+ (eq_attr "mode" "VNx64QI,VNx64BI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx64QImode)")
+ (eq_attr "mode" "VNx1HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1HImode)")
+ (eq_attr "mode" "VNx2HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2HImode)")
+ (eq_attr "mode" "VNx4HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4HImode)")
+ (eq_attr "mode" "VNx8HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8HImode)")
+ (eq_attr "mode" "VNx16HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx16HImode)")
+ (eq_attr "mode" "VNx32HI")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx32HImode)")
+ (eq_attr "mode" "VNx1SI,VNx1SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1SImode)")
+ (eq_attr "mode" "VNx2SI,VNx2SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2SImode)")
+ (eq_attr "mode" "VNx4SI,VNx4SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4SImode)")
+ (eq_attr "mode" "VNx8SI,VNx8SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8SImode)")
+ (eq_attr "mode" "VNx16SI,VNx16SF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx16SImode)")
+ (eq_attr "mode" "VNx1DI,VNx1DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx1DImode)")
+ (eq_attr "mode" "VNx2DI,VNx2DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx2DImode)")
+ (eq_attr "mode" "VNx4DI,VNx4DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx4DImode)")
+ (eq_attr "mode" "VNx8DI,VNx8DF")
+ (symbol_ref "riscv_vector::get_ratio(E_VNx8DImode)")]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the merge op.
+(define_attr "merge_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vlds,vmalu")
+ (const_int 2)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the avl op.
+(define_attr "vl_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vlds,vmalu")
+ (const_int 4)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the tail policy op.
+(define_attr "tail_policy_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vlds")
+ (const_int 5)]
+ (const_int INVALID_ATTRIBUTE)))
+
+;; The index of operand[] to get the mask policy op.
+(define_attr "mask_policy_op_idx" ""
+ (cond [(eq_attr "type" "vlde,vste,vlds")
+ (const_int 6)]
+ (const_int INVALID_ATTRIBUTE)))
+
;; -----------------------------------------------------------------
;; ---- Miscellaneous Operations
;; -----------------------------------------------------------------