TARGET_BUILTIN(__builtin_s390_vfsqsb, "V4fV4f", "nc", "vector-enhancements-1")
TARGET_BUILTIN(__builtin_s390_vftcisb, "V4SiV4fIii*", "nc", "vector-enhancements-1")
+// Vector-enhancements facility 2 intrinsics.
+TARGET_BUILTIN(__builtin_s390_vsld, "V16UcV16UcV16UcIi", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vsrd, "V16UcV16UcV16UcIi", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vstrsb, "V16UcV16UcV16UcV16Uci*", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vstrsh, "V16UcV8UsV8UsV16Uci*", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vstrsf, "V16UcV4UiV4UiV16Uci*", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vstrszb, "V16UcV16UcV16UcV16Uci*", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vstrszh, "V16UcV8UsV8UsV16Uci*", "nc", "vector-enhancements-2")
+TARGET_BUILTIN(__builtin_s390_vstrszf, "V16UcV4UiV4UiV16Uci*", "nc", "vector-enhancements-2")
+
+// Helpers to implement vec_revb.
+TARGET_BUILTIN(__builtin_s390_vlbrh, "V8UsV8Us", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vlbrf, "V4UiV4Ui", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vlbrg, "V2ULLiV2ULLi", "nc", "vector")
+
#undef BUILTIN
#undef TARGET_BUILTIN
{{"arch9"}, 9}, {{"z196"}, 9},
{{"arch10"}, 10}, {{"zEC12"}, 10},
{{"arch11"}, 11}, {{"z13"}, 11},
- {{"arch12"}, 12}, {{"z14"}, 12}
+ {{"arch12"}, 12}, {{"z14"}, 12},
+ {{"arch13"}, 13},
};
int SystemZTargetInfo::getISARevision(StringRef Name) const {
.Case("arch10", ISARevision >= 10)
.Case("arch11", ISARevision >= 11)
.Case("arch12", ISARevision >= 12)
+ .Case("arch13", ISARevision >= 13)
.Case("htm", HasTransactionalExecution)
.Case("vx", HasVector)
.Default(false);
if (HasVector)
Builder.defineMacro("__VX__");
if (Opts.ZVector)
- Builder.defineMacro("__VEC__", "10302");
+ Builder.defineMacro("__VEC__", "10303");
}
ArrayRef<Builtin::Info> SystemZTargetInfo::getTargetBuiltins() const {
Features["vector"] = true;
if (ISARevision >= 12)
Features["vector-enhancements-1"] = true;
+ if (ISARevision >= 13)
+ Features["vector-enhancements-2"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
return Builder.CreateCall(F, {X, Y, M4Value});
}
+ case SystemZ::BI__builtin_s390_vlbrh:
+ case SystemZ::BI__builtin_s390_vlbrf:
+ case SystemZ::BI__builtin_s390_vlbrg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
+ return Builder.CreateCall(F, X);
+ }
+
// Vector intrinsics that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
INTRINSIC_WITH_CC(s390_vftcisb);
INTRINSIC_WITH_CC(s390_vftcidb);
+ INTRINSIC_WITH_CC(s390_vstrsb);
+ INTRINSIC_WITH_CC(s390_vstrsh);
+ INTRINSIC_WITH_CC(s390_vstrsf);
+
+ INTRINSIC_WITH_CC(s390_vstrszb);
+ INTRINSIC_WITH_CC(s390_vstrszh);
+ INTRINSIC_WITH_CC(s390_vstrszf);
+
#undef INTRINSIC_WITH_CC
default:
}
#endif
+/*-- vec_revb ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed short
+vec_revb(vector signed short __vec) {
+ return (vector signed short)
+ __builtin_s390_vlbrh((vector unsigned short)__vec);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_revb(vector unsigned short __vec) {
+ return __builtin_s390_vlbrh(__vec);
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_revb(vector signed int __vec) {
+ return (vector signed int)
+ __builtin_s390_vlbrf((vector unsigned int)__vec);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_revb(vector unsigned int __vec) {
+ return __builtin_s390_vlbrf(__vec);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_revb(vector signed long long __vec) {
+ return (vector signed long long)
+ __builtin_s390_vlbrg((vector unsigned long long)__vec);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_revb(vector unsigned long long __vec) {
+ return __builtin_s390_vlbrg(__vec);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai vector float
+vec_revb(vector float __vec) {
+ return (vector float)
+ __builtin_s390_vlbrf((vector unsigned int)__vec);
+}
+#endif
+
+static inline __ATTRS_o_ai vector double
+vec_revb(vector double __vec) {
+ return (vector double)
+ __builtin_s390_vlbrg((vector unsigned long long)__vec);
+}
+
+/*-- vec_reve ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai vector signed char
+vec_reve(vector signed char __vec) {
+ return (vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_reve(vector unsigned char __vec) {
+ return (vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool char
+vec_reve(vector bool char __vec) {
+ return (vector bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_reve(vector signed short __vec) {
+ return (vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_reve(vector unsigned short __vec) {
+ return (vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool short
+vec_reve(vector bool short __vec) {
+ return (vector bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector signed int
+vec_reve(vector signed int __vec) {
+ return (vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_reve(vector unsigned int __vec) {
+ return (vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool int
+vec_reve(vector bool int __vec) {
+ return (vector bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_reve(vector signed long long __vec) {
+ return (vector signed long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_reve(vector unsigned long long __vec) {
+ return (vector unsigned long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai vector bool long long
+vec_reve(vector bool long long __vec) {
+ return (vector bool long long) { __vec[1], __vec[0] };
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai vector float
+vec_reve(vector float __vec) {
+ return (vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+#endif
+
+static inline __ATTRS_o_ai vector double
+vec_reve(vector double __vec) {
+ return (vector double) { __vec[1], __vec[0] };
+}
+
/*-- vec_sel ----------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed char
__builtin_s390_vsldb((vector unsigned char)(X), \
(vector unsigned char)(Y), (Z) * 4))
+/*-- vec_sldb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o vector signed char
+vec_sldb(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned char
+vec_sldb(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed short
+vec_sldb(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned short
+vec_sldb(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed int
+vec_sldb(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned int
+vec_sldb(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed long long
+vec_sldb(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned long long
+vec_sldb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector float
+vec_sldb(vector float __a, vector float __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector double
+vec_sldb(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 7);
+
+#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
+ __builtin_s390_vsld((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+#endif
+
/*-- vec_sral ---------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed char
(vector unsigned char)__a, (vector unsigned char)__b);
}
+/*-- vec_srdb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o vector signed char
+vec_srdb(vector signed char __a, vector signed char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned char
+vec_srdb(vector unsigned char __a, vector unsigned char __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed short
+vec_srdb(vector signed short __a, vector signed short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned short
+vec_srdb(vector unsigned short __a, vector unsigned short __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed int
+vec_srdb(vector signed int __a, vector signed int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned int
+vec_srdb(vector unsigned int __a, vector unsigned int __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector signed long long
+vec_srdb(vector signed long long __a, vector signed long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector unsigned long long
+vec_srdb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector float
+vec_srdb(vector float __a, vector float __b, int __c)
+ __constant_range(__c, 0, 7);
+
+extern __ATTRS_o vector double
+vec_srdb(vector double __a, vector double __b, int __c)
+ __constant_range(__c, 0, 7);
+
+#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
+ __builtin_s390_vsrd((vector unsigned char)(X), \
+ (vector unsigned char)(Y), (Z)))
+
+#endif
+
/*-- vec_abs ----------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed char
return __builtin_convertvector(__a, vector double);
}
+/*-- vec_float --------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai vector float
+vec_float(vector signed int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+static inline __ATTRS_o_ai vector float
+vec_float(vector unsigned int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+#endif
+
/*-- vec_signed -------------------------------------------------------------*/
static inline __ATTRS_o_ai vector signed long long
return __builtin_convertvector(__a, vector signed long long);
}
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai vector signed int
+vec_signed(vector float __a) {
+ return __builtin_convertvector(__a, vector signed int);
+}
+#endif
+
/*-- vec_unsigned -----------------------------------------------------------*/
static inline __ATTRS_o_ai vector unsigned long long
return __builtin_convertvector(__a, vector unsigned long long);
}
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai vector unsigned int
+vec_unsigned(vector float __a) {
+ return __builtin_convertvector(__a, vector unsigned int);
+}
+#endif
+
/*-- vec_roundp -------------------------------------------------------------*/
#if __ARCH__ >= 12
return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
}
+/*-- vec_search_string_cc ---------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector signed char __a, vector signed char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector bool char __a, vector bool char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector signed short __a, vector signed short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector bool short __a, vector bool short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector signed int __a, vector signed int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector bool int __a, vector bool int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf(__a, __b, __c, __cc);
+}
+
+#endif
+
+/*-- vec_search_string_until_zero_cc ----------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector signed char __a,
+ vector signed char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector bool char __a,
+ vector bool char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((vector unsigned char)__a,
+ (vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector unsigned char __a,
+ vector unsigned char __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector signed short __a,
+ vector signed short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector bool short __a,
+ vector bool short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((vector unsigned short)__a,
+ (vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector unsigned short __a,
+ vector unsigned short __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector signed int __a,
+ vector signed int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector bool int __a,
+ vector bool int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((vector unsigned int)__a,
+ (vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_search_string_until_zero_cc(vector unsigned int __a,
+ vector unsigned int __b,
+ vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf(__a, __b, __c, __cc);
+}
+
+#endif
+
#undef __constant_pow2_range
#undef __constant_range
#undef __constant
case SystemZ::BI__builtin_s390_vfmaxsb:
case SystemZ::BI__builtin_s390_vfmindb:
case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
--- /dev/null
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu arch13 -triple s390x-unknown-unknown \
+// RUN: -Wall -Wno-unused -Werror -fsyntax-only -verify %s
+
+typedef __attribute__((vector_size(16))) signed char vec_schar;
+typedef __attribute__((vector_size(16))) signed short vec_sshort;
+typedef __attribute__((vector_size(16))) signed int vec_sint;
+typedef __attribute__((vector_size(16))) signed long long vec_slong;
+typedef __attribute__((vector_size(16))) unsigned char vec_uchar;
+typedef __attribute__((vector_size(16))) unsigned short vec_ushort;
+typedef __attribute__((vector_size(16))) unsigned int vec_uint;
+typedef __attribute__((vector_size(16))) unsigned long long vec_ulong;
+typedef __attribute__((vector_size(16))) double vec_double;
+typedef __attribute__((vector_size(16))) float vec_float;
+
+volatile vec_schar vsc;
+volatile vec_sshort vss;
+volatile vec_sint vsi;
+volatile vec_slong vsl;
+volatile vec_uchar vuc;
+volatile vec_ushort vus;
+volatile vec_uint vui;
+volatile vec_ulong vul;
+volatile vec_double vd;
+volatile vec_float vf;
+
+volatile unsigned int len;
+int cc;
+
+void test_integer(void) {
+ __builtin_s390_vsld(vuc, vuc, -1); // expected-error-re {{argument value {{.*}} is outside the valid range}}
+ __builtin_s390_vsld(vuc, vuc, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}}
+ __builtin_s390_vsld(vuc, vuc, len); // expected-error {{must be a constant integer}}
+
+ __builtin_s390_vsrd(vuc, vuc, -1); // expected-error-re {{argument value {{.*}} is outside the valid range}}
+ __builtin_s390_vsrd(vuc, vuc, 8); // expected-error-re {{argument value {{.*}} is outside the valid range}}
+ __builtin_s390_vsrd(vuc, vuc, len); // expected-error {{must be a constant integer}}
+}
+
--- /dev/null
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu arch13 -triple s390x-ibm-linux -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+
+typedef __attribute__((vector_size(16))) signed char vec_schar;
+typedef __attribute__((vector_size(16))) signed short vec_sshort;
+typedef __attribute__((vector_size(16))) signed int vec_sint;
+typedef __attribute__((vector_size(16))) signed long long vec_slong;
+typedef __attribute__((vector_size(16))) unsigned char vec_uchar;
+typedef __attribute__((vector_size(16))) unsigned short vec_ushort;
+typedef __attribute__((vector_size(16))) unsigned int vec_uint;
+typedef __attribute__((vector_size(16))) unsigned long long vec_ulong;
+typedef __attribute__((vector_size(16))) double vec_double;
+typedef __attribute__((vector_size(16))) float vec_float;
+
+volatile vec_schar vsc;
+volatile vec_sshort vss;
+volatile vec_sint vsi;
+volatile vec_slong vsl;
+volatile vec_uchar vuc;
+volatile vec_ushort vus;
+volatile vec_uint vui;
+volatile vec_ulong vul;
+volatile vec_double vd;
+volatile vec_float vf;
+
+volatile unsigned int len;
+const void * volatile cptr;
+void * volatile ptr;
+int cc;
+
+void test_integer(void) {
+ vuc = __builtin_s390_vsld(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = __builtin_s390_vsld(vuc, vuc, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+
+ vuc = __builtin_s390_vsrd(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ vuc = __builtin_s390_vsrd(vuc, vuc, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+}
+
+void test_string(void) {
+ vuc = __builtin_s390_vstrsb(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = __builtin_s390_vstrsh(vus, vus, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = __builtin_s390_vstrsf(vui, vui, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+
+ vuc = __builtin_s390_vstrszb(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = __builtin_s390_vstrszh(vus, vus, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ vuc = __builtin_s390_vstrszf(vui, vui, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+}
+
// CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5)
// CHECK-ASM: vpdi
+ vss = vec_revb(vss);
+ // CHECK-ASM: vperm
+ vus = vec_revb(vus);
+ // CHECK-ASM: vperm
+ vsi = vec_revb(vsi);
+ // CHECK-ASM: vperm
+ vui = vec_revb(vui);
+ // CHECK-ASM: vperm
+ vsl = vec_revb(vsl);
+ // CHECK-ASM: vperm
+ vul = vec_revb(vul);
+ // CHECK-ASM: vperm
+ vd = vec_revb(vd);
+ // CHECK-ASM: vperm
+
+ vsc = vec_reve(vsc);
+ // CHECK-ASM: vperm
+ vuc = vec_reve(vuc);
+ // CHECK-ASM: vperm
+ vbc = vec_reve(vbc);
+ // CHECK-ASM: vperm
+ vss = vec_reve(vss);
+ // CHECK-ASM: vperm
+ vus = vec_reve(vus);
+ // CHECK-ASM: vperm
+ vbs = vec_reve(vbs);
+ // CHECK-ASM: vperm
+ vsi = vec_reve(vsi);
+ // CHECK-ASM: vperm
+ vui = vec_reve(vui);
+ // CHECK-ASM: vperm
+ vbi = vec_reve(vbi);
+ // CHECK-ASM: vperm
+ vsl = vec_reve(vsl);
+ // CHECK-ASM: {{vperm|vpdi}}
+ vul = vec_reve(vul);
+ // CHECK-ASM: {{vperm|vpdi}}
+ vbl = vec_reve(vbl);
+ // CHECK-ASM: {{vperm|vpdi}}
+ vd = vec_reve(vd);
+ // CHECK-ASM: {{vperm|vpdi}}
+
vsc = vec_sel(vsc, vsc, vuc);
// CHECK-ASM: vsel
vsc = vec_sel(vsc, vsc, vbc);
// CHECK: call <2 x i64> @llvm.s390.vbperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
// CHECK-ASM: vbperm
+ vf = vec_revb(vf);
+ // CHECK-ASM: vperm
+ vd = vec_revb(vd);
+ // CHECK-ASM: vperm
+
+ vf = vec_reve(vf);
+ // CHECK-ASM: vperm
+ vd = vec_reve(vd);
+ // CHECK-ASM: {{vperm|vpdi}}
+
vf = vec_sel(vf, vf, vui);
// CHECK-ASM: vsel
vf = vec_sel(vf, vf, vbi);
--- /dev/null
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu arch13 -triple s390x-linux-gnu \
+// RUN: -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -fsyntax-only -verify %s
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector float vf;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile float f;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_integer(void) {
+ vsc = vec_sldb(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vuc = vec_sldb(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_sldb(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vus = vec_sldb(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vsi = vec_sldb(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vui = vec_sldb(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vsl = vec_sldb(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vul = vec_sldb(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vf = vec_sldb(vf, vf, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vd = vec_sldb(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+
+ vsc = vec_srdb(vsc, vsc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vuc = vec_srdb(vuc, vuc, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vss = vec_srdb(vss, vss, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vus = vec_srdb(vus, vus, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vsi = vec_srdb(vsi, vsi, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vui = vec_srdb(vui, vui, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vsl = vec_srdb(vsl, vsl, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vul = vec_srdb(vul, vul, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vf = vec_srdb(vf, vf, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+ vd = vec_srdb(vd, vd, idx); // expected-error {{no matching function}}
+ // expected-note@vecintrin.h:* 9 {{candidate function not viable}}
+ // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}}
+}
+
--- /dev/null
+// REQUIRES: systemz-registered-target
+// RUN: %clang_cc1 -target-cpu arch13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -target-cpu arch13 -triple s390x-linux-gnu \
+// RUN: -O -fzvector -fno-lax-vector-conversions \
+// RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
+
+#include <vecintrin.h>
+
+volatile vector signed char vsc;
+volatile vector signed short vss;
+volatile vector signed int vsi;
+volatile vector signed long long vsl;
+volatile vector unsigned char vuc;
+volatile vector unsigned short vus;
+volatile vector unsigned int vui;
+volatile vector unsigned long long vul;
+volatile vector bool char vbc;
+volatile vector bool short vbs;
+volatile vector bool int vbi;
+volatile vector bool long long vbl;
+volatile vector float vf;
+volatile vector double vd;
+
+volatile signed char sc;
+volatile signed short ss;
+volatile signed int si;
+volatile signed long long sl;
+volatile unsigned char uc;
+volatile unsigned short us;
+volatile unsigned int ui;
+volatile unsigned long long ul;
+volatile float f;
+volatile double d;
+
+const void * volatile cptr;
+const signed char * volatile cptrsc;
+const signed short * volatile cptrss;
+const signed int * volatile cptrsi;
+const signed long long * volatile cptrsl;
+const unsigned char * volatile cptruc;
+const unsigned short * volatile cptrus;
+const unsigned int * volatile cptrui;
+const unsigned long long * volatile cptrul;
+const float * volatile cptrf;
+const double * volatile cptrd;
+
+void * volatile ptr;
+signed char * volatile ptrsc;
+signed short * volatile ptrss;
+signed int * volatile ptrsi;
+signed long long * volatile ptrsl;
+unsigned char * volatile ptruc;
+unsigned short * volatile ptrus;
+unsigned int * volatile ptrui;
+unsigned long long * volatile ptrul;
+float * volatile ptrf;
+double * volatile ptrd;
+
+volatile unsigned int len;
+volatile int idx;
+int cc;
+
+void test_core(void) {
+ // CHECK-ASM-LABEL: test_core
+ vector signed short vss2;
+ vector signed int vsi2;
+ vector signed long long vsl2;
+ vector unsigned short vus2;
+ vector unsigned int vui2;
+ vector unsigned long long vul2;
+ vector float vf2;
+ vector double vd2;
+
+ vss += vec_revb(vec_xl(idx, cptrss));
+ // CHECK-ASM: vlbrh
+ vus += vec_revb(vec_xl(idx, cptrus));
+ // CHECK-ASM: vlbrh
+ vsi += vec_revb(vec_xl(idx, cptrsi));
+ // CHECK-ASM: vlbrf
+ vui += vec_revb(vec_xl(idx, cptrui));
+ // CHECK-ASM: vlbrf
+ vsl += vec_revb(vec_xl(idx, cptrsl));
+ // CHECK-ASM: vlbrg
+ vul += vec_revb(vec_xl(idx, cptrul));
+ // CHECK-ASM: vlbrg
+ vf += vec_revb(vec_xl(idx, cptrf));
+ // CHECK-ASM: vlbrf
+ vd += vec_revb(vec_xl(idx, cptrd));
+ // CHECK-ASM: vlbrg
+
+ vec_xst(vec_revb(vss), idx, ptrss);
+ // CHECK-ASM: vstbrh
+ vec_xst(vec_revb(vus), idx, ptrus);
+ // CHECK-ASM: vstbrh
+ vec_xst(vec_revb(vsi), idx, ptrsi);
+ // CHECK-ASM: vstbrf
+ vec_xst(vec_revb(vui), idx, ptrui);
+ // CHECK-ASM: vstbrf
+ vec_xst(vec_revb(vsl), idx, ptrsl);
+ // CHECK-ASM: vstbrg
+ vec_xst(vec_revb(vul), idx, ptrul);
+ // CHECK-ASM: vstbrg
+ vec_xst(vec_revb(vf), idx, ptrf);
+ // CHECK-ASM: vstbrf
+ vec_xst(vec_revb(vd), idx, ptrd);
+ // CHECK-ASM: vstbrg
+
+ vss += vec_revb(vec_insert_and_zero(cptrss));
+ // CHECK-ASM: vllebrzh
+ vus += vec_revb(vec_insert_and_zero(cptrus));
+ // CHECK-ASM: vllebrzh
+ vsi += vec_revb(vec_insert_and_zero(cptrsi));
+ // CHECK-ASM: vllebrzf
+ vui += vec_revb(vec_insert_and_zero(cptrui));
+ // CHECK-ASM: vllebrzf
+ vsl += vec_revb(vec_insert_and_zero(cptrsl));
+ // CHECK-ASM: vllebrzg
+ vul += vec_revb(vec_insert_and_zero(cptrul));
+ // CHECK-ASM: vllebrzg
+ vf += vec_revb(vec_insert_and_zero(cptrf));
+ // CHECK-ASM: vllebrzf
+ vd += vec_revb(vec_insert_and_zero(cptrd));
+ // CHECK-ASM: vllebrzg
+
+ vss += vec_revb(vec_splats(ss));
+ // CHECK-ASM: vlbrreph
+ vus += vec_revb(vec_splats(us));
+ // CHECK-ASM: vlbrreph
+ vsi += vec_revb(vec_splats(si));
+ // CHECK-ASM: vlbrrepf
+ vui += vec_revb(vec_splats(ui));
+ // CHECK-ASM: vlbrrepf
+ vsl += vec_revb(vec_splats(sl));
+ // CHECK-ASM: vlbrrepg
+ vul += vec_revb(vec_splats(ul));
+ // CHECK-ASM: vlbrrepg
+ vf += vec_revb(vec_splats(f));
+ // CHECK-ASM: vlbrrepf
+ vd += vec_revb(vec_splats(d));
+ // CHECK-ASM: vlbrrepg
+
+ vus = vec_splats(__builtin_bswap16(us));
+ // CHECK-ASM: vlbrreph
+ vui = vec_splats(__builtin_bswap32(ui));
+ // CHECK-ASM: vlbrrepf
+ vul = vec_splats((unsigned long long)__builtin_bswap64(ul));
+ // CHECK-ASM: vlbrrepg
+
+ vss2 = vss;
+ vss += vec_revb(vec_insert(ss, vec_revb(vss2), 0));
+ // CHECK-ASM: vlebrh
+ vus2 = vus;
+ vus += vec_revb(vec_insert(us, vec_revb(vus2), 0));
+ // CHECK-ASM: vlebrh
+ vsi2 = vsi;
+ vsi += vec_revb(vec_insert(si, vec_revb(vsi2), 0));
+ // CHECK-ASM: vlebrf
+ vui2 = vui;
+ vui += vec_revb(vec_insert(ui, vec_revb(vui2), 0));
+ // CHECK-ASM: vlebrf
+ vsl2 = vsl;
+ vsl += vec_revb(vec_insert(sl, vec_revb(vsl2), 0));
+ // CHECK-ASM: vlebrg
+ vul2 = vul;
+ vul += vec_revb(vec_insert(ul, vec_revb(vul2), 0));
+ // CHECK-ASM: vlebrg
+ vf2 = vf;
+ vf += vec_revb(vec_insert(f, vec_revb(vf2), 0));
+ // CHECK-ASM: vlebrf
+ vd2 = vd;
+ vd += vec_revb(vec_insert(d, vec_revb(vd2), 0));
+ // CHECK-ASM: vlebrg
+
+ vus2 = vus;
+ vus = vec_insert(__builtin_bswap16(us), vus2, 0);
+ // CHECK-ASM: vlebrh
+ vui2 = vui;
+ vui = vec_insert(__builtin_bswap32(ui), vui2, 0);
+ // CHECK-ASM: vlebrf
+ vul2 = vul;
+ vul = vec_insert(__builtin_bswap64(ul), vul2, 0);
+ // CHECK-ASM: vlebrg
+
+ ss = vec_extract(vec_revb(vss), 0);
+ // CHECK-ASM: vstebrh
+ us = vec_extract(vec_revb(vus), 0);
+ // CHECK-ASM: vstebrh
+ si = vec_extract(vec_revb(vsi), 0);
+ // CHECK-ASM: vstebrf
+ ui = vec_extract(vec_revb(vui), 0);
+ // CHECK-ASM: vstebrf
+ sl = vec_extract(vec_revb(vsl), 0);
+ // CHECK-ASM: vstebrg
+ ul = vec_extract(vec_revb(vul), 0);
+ // CHECK-ASM: vstebrg
+ f = vec_extract(vec_revb(vf), 0);
+ // CHECK-ASM: vstebrf
+ d = vec_extract(vec_revb(vd), 0);
+ // CHECK-ASM: vstebrg
+
+ us = __builtin_bswap16(vec_extract(vus, 0));
+ // CHECK-ASM: vstebrh
+ ui = __builtin_bswap32(vec_extract(vui, 0));
+ // CHECK-ASM: vstebrf
+ ul = __builtin_bswap64(vec_extract(vul, 0));
+ // CHECK-ASM: vstebrg
+
+ vsc += vec_reve(vec_xl(idx, cptrsc));
+ // CHECK-ASM: vlbrq
+ vuc += vec_reve(vec_xl(idx, cptruc));
+ // CHECK-ASM: vlbrq
+ vss += vec_reve(vec_xl(idx, cptrss));
+ // CHECK-ASM: vlerh
+ vus += vec_reve(vec_xl(idx, cptrus));
+ // CHECK-ASM: vlerh
+ vsi += vec_reve(vec_xl(idx, cptrsi));
+ // CHECK-ASM: vlerf
+ vui += vec_reve(vec_xl(idx, cptrui));
+ // CHECK-ASM: vlerf
+ vsl += vec_reve(vec_xl(idx, cptrsl));
+ // CHECK-ASM: vlerg
+ vul += vec_reve(vec_xl(idx, cptrul));
+ // CHECK-ASM: vlerg
+ vf += vec_reve(vec_xl(idx, cptrf));
+ // CHECK-ASM: vlerf
+ vd += vec_reve(vec_xl(idx, cptrd));
+ // CHECK-ASM: vlerg
+
+ vec_xst(vec_reve(vsc), idx, ptrsc);
+ // CHECK-ASM: vstbrq
+ vec_xst(vec_reve(vuc), idx, ptruc);
+ // CHECK-ASM: vstbrq
+ vec_xst(vec_reve(vss), idx, ptrss);
+ // CHECK-ASM: vsterh
+ vec_xst(vec_reve(vus), idx, ptrus);
+ // CHECK-ASM: vsterh
+ vec_xst(vec_reve(vsi), idx, ptrsi);
+ // CHECK-ASM: vsterf
+ vec_xst(vec_reve(vui), idx, ptrui);
+ // CHECK-ASM: vsterf
+ vec_xst(vec_reve(vsl), idx, ptrsl);
+ // CHECK-ASM: vsterg
+ vec_xst(vec_reve(vul), idx, ptrul);
+ // CHECK-ASM: vsterg
+ vec_xst(vec_reve(vf), idx, ptrf);
+ // CHECK-ASM: vsterf
+ vec_xst(vec_reve(vd), idx, ptrd);
+ // CHECK-ASM: vsterg
+}
+
+void test_integer(void) {
+ // CHECK-ASM-LABEL: test_integer
+
+ vsc = vec_sldb(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vsc = vec_sldb(vsc, vsc, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vuc = vec_sldb(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vuc = vec_sldb(vuc, vuc, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vss = vec_sldb(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vss = vec_sldb(vss, vss, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vus = vec_sldb(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vus = vec_sldb(vus, vus, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vsi = vec_sldb(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vsi = vec_sldb(vsi, vsi, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vui = vec_sldb(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vui = vec_sldb(vui, vui, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vsl = vec_sldb(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vsl = vec_sldb(vsl, vsl, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vul = vec_sldb(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vul = vec_sldb(vul, vul, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vf = vec_sldb(vf, vf, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vf = vec_sldb(vf, vf, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+ vd = vec_sldb(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsld
+ vd = vec_sldb(vd, vd, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsld(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsld
+
+ vsc = vec_srdb(vsc, vsc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vsc = vec_srdb(vsc, vsc, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vuc = vec_srdb(vuc, vuc, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vuc = vec_srdb(vuc, vuc, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vss = vec_srdb(vss, vss, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vss = vec_srdb(vss, vss, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vus = vec_srdb(vus, vus, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vus = vec_srdb(vus, vus, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vsi = vec_srdb(vsi, vsi, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vsi = vec_srdb(vsi, vsi, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vui = vec_srdb(vui, vui, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vui = vec_srdb(vui, vui, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vsl = vec_srdb(vsl, vsl, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vsl = vec_srdb(vsl, vsl, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vul = vec_srdb(vul, vul, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vul = vec_srdb(vul, vul, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vf = vec_srdb(vf, vf, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vf = vec_srdb(vf, vf, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+ vd = vec_srdb(vd, vd, 0);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0)
+ // CHECK-ASM: vsrd
+ vd = vec_srdb(vd, vd, 7);
+ // CHECK: call <16 x i8> @llvm.s390.vsrd(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 7)
+ // CHECK-ASM: vsrd
+}
+
+void test_string(void) {
+ // CHECK-ASM-LABEL: test_string
+
+ vuc = vec_search_string_cc(vsc, vsc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsb %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vbc, vbc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsb %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsb %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vss, vss, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsh %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vbs, vbs, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsh %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vus, vus, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsh %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vsi, vsi, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsf %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vbi, vbi, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsf %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+ vuc = vec_search_string_cc(vui, vui, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrsf %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, 0
+
+ vuc = vec_search_string_until_zero_cc(vsc, vsc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszb %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vbc, vbc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszb %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vuc, vuc, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszb %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vss, vss, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszh %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vbs, vbs, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszh %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vus, vus, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszh %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vsi, vsi, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszf %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vbi, vbi, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszf %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+ vuc = vec_search_string_until_zero_cc(vui, vui, vuc, &cc);
+ // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrszf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK-ASM: vstrszf %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}
+}
+
+void test_float(void) {
+ // CHECK-ASM-LABEL: test_float
+
+ vd = vec_double(vsl);
+ // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK-ASM: vcdgb
+ vd = vec_double(vul);
+ // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double>
+ // CHECK-ASM: vcdlgb
+ vf = vec_float(vsi);
+ // CHECK: sitofp <4 x i32> %{{.*}} to <4 x float>
+ // CHECK-ASM: vcefb
+ vf = vec_float(vui);
+ // CHECK: uitofp <4 x i32> %{{.*}} to <4 x float>
+ // CHECK-ASM: vcelfb
+
+ vsl = vec_signed(vd);
+ // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64>
+ // CHECK-ASM: vcgdb
+ vsi = vec_signed(vf);
+ // CHECK: fptosi <4 x float> %{{.*}} to <4 x i32>
+ // CHECK-ASM: vcfeb
+ vul = vec_unsigned(vd);
+ // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64>
+ // CHECK-ASM: vclgdb
+ vui = vec_unsigned(vf);
+ // CHECK: fptoui <4 x float> %{{.*}} to <4 x i32>
+ // CHECK-ASM: vclfeb
+}
+
// RUN: -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-VECTOR %s
// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu arch12 \
// RUN: -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-VECTOR %s
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu arch13 \
+// RUN: -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-VECTOR %s
// Vector types
// RUN: -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu arch12 \
// RUN: -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu arch13 \
+// RUN: -emit-llvm -o - %s | FileCheck %s
// Scalar types
// RUN: FileCheck %s -check-prefix=SYSTEMZ-VECTOR
// RUN: %clang_cc1 -triple s390x-unknown -target-cpu arch12 -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=SYSTEMZ-VECTOR
+// RUN: %clang_cc1 -triple s390x-unknown -target-cpu arch13 -o - -emit-llvm %s | \
+// RUN: FileCheck %s -check-prefix=SYSTEMZ-VECTOR
// SYSTEMZ-VECTOR: target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
// RUN: %clang_cc1 -triple msp430-unknown -o - -emit-llvm %s | \
// RUN: %clang -target s390x -### -S -emit-llvm -march=arch11 %s 2>&1 | FileCheck --check-prefix=CHECK-ARCH11 %s
// RUN: %clang -target s390x -### -S -emit-llvm -march=z14 %s 2>&1 | FileCheck --check-prefix=CHECK-Z14 %s
// RUN: %clang -target s390x -### -S -emit-llvm -march=arch12 %s 2>&1 | FileCheck --check-prefix=CHECK-ARCH12 %s
+// RUN: %clang -target s390x -### -S -emit-llvm -march=arch13 %s 2>&1 | FileCheck --check-prefix=CHECK-ARCH13 %s
// CHECK-Z9: error: unknown target CPU 'z9'
// CHECK-Z10: "-target-cpu" "z10"
// CHECK-ARCH11: "-target-cpu" "arch11"
// CHECK-Z14: "-target-cpu" "z14"
// CHECK-ARCH12: "-target-cpu" "arch12"
+// CHECK-ARCH13: "-target-cpu" "arch13"
int x;
// RUN: not %clang_cc1 -triple systemz--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix SYSTEMZ
// SYSTEMZ: error: unknown target CPU 'not-a-cpu'
// SYSTEMZ: note: valid target CPU values are: arch8, z10, arch9, z196, arch10,
-// SYSTEMZ-SAME: zEC12, arch11, z13, arch12, z14
+// SYSTEMZ-SAME: zEC12, arch11, z13, arch12, z14, arch13
// RUN: not %clang_cc1 -triple sparc--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix SPARC
// SPARC: error: unknown target CPU 'not-a-cpu'
// CHECK_SYSTEMZ_ARCH12: #define __s390x__ 1
// CHECK_SYSTEMZ_ARCH12: #define __zarch__ 1
+// RUN: %clang -march=arch13 -E -dM %s -o - 2>&1 \
+// RUN: -target s390x-unknown-linux \
+// RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_SYSTEMZ_ARCH13
+// CHECK_SYSTEMZ_ARCH13: #define __ARCH__ 13
+// CHECK_SYSTEMZ_ARCH13: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 1
+// CHECK_SYSTEMZ_ARCH13: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 1
+// CHECK_SYSTEMZ_ARCH13: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1
+// CHECK_SYSTEMZ_ARCH13: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 1
+// CHECK_SYSTEMZ_ARCH13: #define __HTM__ 1
+// CHECK_SYSTEMZ_ARCH13: #define __LONG_DOUBLE_128__ 1
+// CHECK_SYSTEMZ_ARCH13: #define __VX__ 1
+// CHECK_SYSTEMZ_ARCH13: #define __s390__ 1
+// CHECK_SYSTEMZ_ARCH13: #define __s390x__ 1
+// CHECK_SYSTEMZ_ARCH13: #define __zarch__ 1
+
// RUN: %clang -mhtm -E -dM %s -o - 2>&1 \
// RUN: -target s390x-unknown-linux \
// RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_SYSTEMZ_HTM
// RUN: %clang -mzvector -E -dM %s -o - 2>&1 \
// RUN: -target s390x-unknown-linux \
// RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR
-// CHECK_SYSTEMZ_ZVECTOR: #define __VEC__ 10302
+// CHECK_SYSTEMZ_ZVECTOR: #define __VEC__ 10303
// Begin amdgcn tests ----------------