As discussed on D97478. The removal of the custom tag causes some changes in the add/sub-overflow expansion as it no longer expands to sat-arith codegen.
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
}
- for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
- setOperationAction(ISD::UADDSAT, VT, Custom);
- setOperationAction(ISD::SADDSAT, VT, Custom);
- setOperationAction(ISD::USUBSAT, VT, Custom);
- setOperationAction(ISD::SSUBSAT, VT, Custom);
- setOperationAction(ISD::VSELECT, VT, Expand);
- }
+ for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
+ setOperationAction(ISD::VSELECT, VT, Expand);
for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
setOperationAction(ISD::SETCC, VT, Custom);
for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
setOperationAction(ISD::VSELECT, VT, Expand);
- setOperationAction(ISD::UADDSAT, VT, Custom);
- setOperationAction(ISD::SADDSAT, VT, Custom);
- setOperationAction(ISD::USUBSAT, VT, Custom);
- setOperationAction(ISD::SSUBSAT, VT, Custom);
-
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
unsigned Opcode = Op.getOpcode();
SDLoc DL(Op);
- if (VT.getScalarType() == MVT::i1) {
- switch (Opcode) {
- default: llvm_unreachable("Expected saturated arithmetic opcode");
- case ISD::UADDSAT:
- case ISD::SADDSAT:
- // *addsat i1 X, Y --> X | Y
- return DAG.getNode(ISD::OR, DL, VT, X, Y);
- case ISD::USUBSAT:
- case ISD::SSUBSAT:
- // *subsat i1 X, Y --> X & ~Y
- return DAG.getNode(ISD::AND, DL, VT, X, DAG.getNOT(DL, Y, VT));
- }
- }
-
if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
(VT.is256BitVector() && !Subtarget.hasInt256())) {
assert(Op.getSimpleValueType().isInteger() &&
;
; AVX512-LABEL: saddo_v4i1:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpslld $31, %xmm1, %xmm1
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512-NEXT: vpslld $31, %xmm1, %xmm1
; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1
-; AVX512-NEXT: vptestmd %xmm2, %xmm2, %k2
-; AVX512-NEXT: kxorw %k1, %k0, %k0
-; AVX512-NEXT: kxorw %k2, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k2
+; AVX512-NEXT: vptestnmd %xmm0, %xmm0, %k0 {%k2}
+; AVX512-NEXT: kxorw %k0, %k1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
-; AVX512-NEXT: kshiftlw $12, %k0, %k0
+; AVX512-NEXT: kshiftlw $12, %k2, %k0
; AVX512-NEXT: kshiftrw $12, %k0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, (%rdi)
;
; AVX512-LABEL: ssubo_v4i1:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpslld $31, %xmm1, %xmm1
-; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k0
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1
-; AVX512-NEXT: vptestnmd %xmm1, %xmm1, %k2 {%k1}
-; AVX512-NEXT: kxorw %k0, %k1, %k0
-; AVX512-NEXT: kxorw %k2, %k0, %k1
+; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k1
+; AVX512-NEXT: vptestnmd %xmm0, %xmm0, %k2 {%k1}
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
-; AVX512-NEXT: kshiftlw $12, %k0, %k0
+; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k2} {z}
+; AVX512-NEXT: kshiftlw $12, %k1, %k0
; AVX512-NEXT: kshiftrw $12, %k0, %k0
; AVX512-NEXT: kmovd %k0, %eax
; AVX512-NEXT: movb %al, (%rdi)