#ifdef __VSX__
+static __inline__ vector bool int __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool int *__b) {
+ return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
static __inline__ vector signed int __ATTRS_o_ai
vec_vsx_ld(int __a, const vector signed int *__b) {
return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
}
+static __inline__ vector signed int __ATTRS_o_ai
+vec_vsx_ld(int __a, const signed int *__b) {
+ return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
static __inline__ vector unsigned int __ATTRS_o_ai
vec_vsx_ld(int __a, const vector unsigned int *__b) {
return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
}
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_vsx_ld(int __a, const unsigned int *__b) {
+ return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
+}
+
static __inline__ vector float __ATTRS_o_ai
vec_vsx_ld(int __a, const vector float *__b) {
return (vector float)__builtin_vsx_lxvw4x(__a, __b);
return (vector double)__builtin_vsx_lxvd2x(__a, __b);
}
+static __inline__ vector double __ATTRS_o_ai
+vec_vsx_ld(int __a, const double *__b) {
+ return (vector double)__builtin_vsx_lxvd2x(__a, __b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_vsx_ld(int __a, const vector bool short *__b) {
+ return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
+}
+
static __inline__ vector signed short __ATTRS_o_ai
vec_vsx_ld(int __a, const vector signed short *__b) {
return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
#ifdef __VSX__
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+ vector bool int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+ signed int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
+ unsigned int *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
vector signed int *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
__builtin_vsx_stxvd2x((vector double)__a, __b, __c);
}
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
+ double *__c) {
+ __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+ vector bool short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+ signed short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
+
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
+ unsigned short *__c) {
+ __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
+}
static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
vector signed short *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
- vector signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+ vector bool char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
signed char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
- int __b,
- vector unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
+ unsigned char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
- int __b, unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+ vector signed char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
- vector bool char *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
+ signed char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
- signed char *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
+ int __b,
+ vector unsigned char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
- unsigned char *__c) {
+static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
+ int __b, unsigned char *__c) {
__builtin_vsx_stxvw4x((vector int)__a, __b, __c);
}
// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
#include <altivec.h>
+vector bool char vbc = { 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1 };
vector signed char vsc = { -8, 9, -10, 11, -12, 13, -14, 15,
-0, 1, -2, 3, -4, 5, -6, 7};
vector unsigned char vuc = { 8, 9, 10, 11, 12, 13, 14, 15,
0, 1, 2, 3, 4, 5, 6, 7};
-vector bool char vbc = { 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1 };
vector float vf = { -1.5, 2.5, -3.5, 4.5 };
vector double vd = { 3.5, -7.5 };
+vector bool short vbs = { 0, 1, 0, 1, 0, 1, 0, 1 };
+vector signed short vss = { -1, 2, -3, 4, -5, 6, -7, 8 };
+vector unsigned short vus = { 0, 1, 2, 3, 4, 5, 6, 7 };
+vector bool int vbi = { 0, 1, 0, 1 };
vector signed int vsi = { -1, 2, -3, 4 };
vector unsigned int vui = { 0, 1, 2, 3 };
vector bool long long vbll = { 1, 0 };
vector signed long long vsll = { 255LL, -937LL };
vector unsigned long long vull = { 1447LL, 2894LL };
-vector signed short vss = { -1, 2, -3, 4, -5, 6, -7, 8 };
-vector unsigned short vus = { 0, 1, 2, 3, 4, 5, 6, 7 };
-float f = 12.34;
double d = 23.4;
-signed char sc = -128;
-unsigned char uc = 1;
-signed short ss = -32768;
-unsigned short us = 1;
+float af[4] = {23.4f, 56.7f, 89.0f, 12.3f};
+double ad[2] = {23.4, 56.7};
+signed char asc[16] = { -8, 9, -10, 11, -12, 13, -14, 15,
+ -0, 1, -2, 3, -4, 5, -6, 7};
+unsigned char auc[16] = { 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7};
+signed short ass[8] = { -1, 2, -3, 4, -5, 6, -7, 8 };
+unsigned short aus[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+signed int asi[4] = { -1, 2, -3, 4 };
+unsigned int aui[4] = { 0, 1, 2, 3 };
+signed long asl[2] = { -1L, 2L };
+unsigned long aul[2] = { 1L, 2L };
vector float res_vf;
vector double res_vd;
+vector bool char res_vbc;
+vector signed char res_vsc;
+vector unsigned char res_vuc;
+vector bool short res_vbs;
+vector signed short res_vss;
+vector unsigned short res_vus;
+vector bool int res_vbi;
vector signed int res_vsi;
vector unsigned int res_vui;
-vector bool int res_vbi;
vector bool long long res_vbll;
vector signed long long res_vsll;
vector unsigned long long res_vull;
-vector signed short res_vss;
-vector unsigned short res_vus;
-vector bool char res_vbc;
-vector signed char res_vsc;
-vector unsigned char res_vuc;
-float res_f;
double res_d;
-signed int res_si;
-unsigned int res_ui;
-signed char res_sc;
-unsigned char res_uc;
-signed short res_ss;
-unsigned short res_us;
+float res_af[4];
+double res_ad[2];
+signed char res_asc[16];
+unsigned char res_auc[16];
+signed short res_ass[8];
+unsigned short res_aus[8];
+signed int res_asi[4];
+unsigned int res_aui[4];
void dummy() { }
/* vec_vsx_ld */
+ res_vbi = vec_vsx_ld(0, &vbi);
+// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
+
res_vsi = vec_vsx_ld(0, &vsi);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
+ res_vsi = vec_vsx_ld(0, asi);
+// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
+
res_vui = vec_vsx_ld(0, &vui);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
+ res_vui = vec_vsx_ld(0, aui);
+// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
+
res_vf = vec_vsx_ld (0, &vf);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
- res_vf = vec_vsx_ld (0, &f);
+ res_vf = vec_vsx_ld (0, af);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x
- res_vull = vec_vsx_ld(0, &vull);
+ res_vd = vec_vsx_ld(0, ad);
// CHECK: @llvm.ppc.vsx.lxvd2x
// CHECK-LE: @llvm.ppc.vsx.lxvd2x
- res_vd = vec_vsx_ld(0, &vd);
-// CHECK: @llvm.ppc.vsx.lxvd2x
-// CHECK-LE: @llvm.ppc.vsx.lxvd2x
+ res_vbs = vec_vsx_ld(0, &vbs);
+// CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
res_vss = vec_vsx_ld(0, &vss);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
- res_vss = vec_vsx_ld(0, &ss);
+ res_vss = vec_vsx_ld(0, ass);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
- res_vus = vec_vsx_ld(0, &us);
+ res_vus = vec_vsx_ld(0, aus);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
- res_vsc = vec_vsx_ld(0, &sc);
+ res_vsc = vec_vsx_ld(0, asc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
- res_vuc = vec_vsx_ld(0, &uc);
+ res_vuc = vec_vsx_ld(0, auc);
// CHECK: @llvm.ppc.vsx.lxvw4x
// CHECK-LE: @llvm.ppc.vsx.lxvw4x
/* vec_vsx_st */
+ vec_vsx_st(vbi, 0, &res_vbi);
+// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
+
+ vec_vsx_st(vbi, 0, res_aui);
+// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
+
+ vec_vsx_st(vbi, 0, res_asi);
+// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
+
vec_vsx_st(vsi, 0, &res_vsi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vsi, 0, &res_si);
+ vec_vsx_st(vsi, 0, res_asi);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vui, 0, &res_ui);
+ vec_vsx_st(vui, 0, res_aui);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vf, 0, &res_f);
+ vec_vsx_st(vf, 0, res_af);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvd2x
// CHECK-LE: @llvm.ppc.vsx.stxvd2x
+ vec_vsx_st(vd, 0, res_ad);
+// CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
+
+ vec_vsx_st(vbs, 0, &res_vbs);
+// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
+
+ vec_vsx_st(vbs, 0, res_aus);
+// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
+
+ vec_vsx_st(vbs, 0, res_ass);
+// CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
+
vec_vsx_st(vss, 0, &res_vss);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vss, 0, &res_ss);
+ vec_vsx_st(vss, 0, res_ass);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vus, 0, &res_us);
+ vec_vsx_st(vus, 0, res_aus);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vsc, 0, &res_sc);
+ vec_vsx_st(vsc, 0, res_asc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vuc, 0, &res_uc);
+ vec_vsx_st(vuc, 0, res_auc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vbc, 0, &res_sc);
+ vec_vsx_st(vbc, 0, res_asc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x
- vec_vsx_st(vbc, 0, &res_uc);
+ vec_vsx_st(vbc, 0, res_auc);
// CHECK: @llvm.ppc.vsx.stxvw4x
// CHECK-LE: @llvm.ppc.vsx.stxvw4x