setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
+ // Saturates
for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
- // Vector reductions
- setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
- setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
-
- // Saturates
setOperationAction(ISD::SADDSAT, VT, Legal);
setOperationAction(ISD::UADDSAT, VT, Legal);
setOperationAction(ISD::SSUBSAT, VT, Legal);
setOperationAction(ISD::USUBSAT, VT, Legal);
}
+
+ // Vector reductions
for (MVT VT : { MVT::v4f16, MVT::v2f32,
MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
}
+ for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
+ MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
+ }
+ setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
declare i64 @llvm.experimental.vector.reduce.umax.v1i64(<1 x i64> %a)
declare i128 @llvm.experimental.vector.reduce.umax.v1i128(<1 x i128> %a)
+declare i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %a)
declare i8 @llvm.experimental.vector.reduce.umax.v3i8(<3 x i8> %a)
declare i8 @llvm.experimental.vector.reduce.umax.v9i8(<9 x i8> %a)
declare i32 @llvm.experimental.vector.reduce.umax.v3i32(<3 x i32> %a)
ret i128 %b
}
+; No i64 vector support for UMAX.
+define i64 @test_v2i64(<2 x i64> %a) nounwind {
+; CHECK-LABEL: test_v2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, v0.d[1]
+; CHECK-NEXT: fmov x9, d0
+; CHECK-NEXT: cmp x9, x8
+; CHECK-NEXT: csel x0, x9, x8, hi
+; CHECK-NEXT: ret
+ %b = call i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %a)
+ ret i64 %b
+}
+
define i8 @test_v3i8(<3 x i8> %a) nounwind {
; CHECK-LABEL: test_v3i8:
; CHECK: // %bb.0: