forked from OSchip/llvm-project
Add missing builtins to altivec.h for ABI compliance (vol. 1)
This patch corresponds to review: http://reviews.llvm.org/D10637 This is the first round of additions of missing builtins listed in the ABI document. More to come (this builds onto what seurer already addes). This patch adds: vector signed long long vec_abs(vector signed long long) vector double vec_abs(vector double) vector signed long long vec_add(vector signed long long, vector signed long long) vector unsigned long long vec_add(vector unsigned long long, vector unsigned long long) vector double vec_add(vector double, vector double) vector double vec_and(vector bool long long, vector double) vector double vec_and(vector double, vector bool long long) vector double vec_and(vector double, vector double) vector signed long long vec_and(vector signed long long, vector signed long long) vector double vec_andc(vector bool long long, vector double) vector double vec_andc(vector double, vector bool long long) vector double vec_andc(vector double, vector double) vector signed long long vec_andc(vector signed long long, vector signed long long) vector double vec_ceil(vector double) vector bool long long vec_cmpeq(vector double, vector double) vector bool long long vec_cmpge(vector double, vector double) vector bool long long vec_cmpge(vector signed long long, vector signed long long) vector bool long long vec_cmpge(vector unsigned long long, vector unsigned long long) vector bool long long vec_cmpgt(vector double, vector double) vector bool long long vec_cmple(vector double, vector double) vector bool long long vec_cmple(vector signed long long, vector signed long long) vector bool long long vec_cmple(vector unsigned long long, vector unsigned long long) vector bool long long vec_cmplt(vector double, vector double) vector bool long long vec_cmplt(vector signed long long, vector signed long long) vector bool long long vec_cmplt(vector unsigned long long, vector unsigned long long) llvm-svn: 240821
This commit is contained in:
parent
f502a428e6
commit
2f1f926e34
|
@ -267,6 +267,18 @@ BUILTIN(__builtin_vsx_xsmindp, "ddd", "")
|
|||
BUILTIN(__builtin_vsx_xvdivdp, "V2dV2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvdivsp, "V4fV4fV4f", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvrdpip, "V2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvrspip, "V4fV4f", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvcmpeqdp, "V2ULLiV2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvcmpeqsp, "V4UiV4fV4f", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvcmpgedp, "V2ULLiV2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvcmpgesp, "V4UiV4fV4f", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvcmpgtdp, "V2ULLiV2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvcmpgtsp, "V4UiV4fV4f", "")
|
||||
|
||||
// HTM builtins
|
||||
BUILTIN(__builtin_tbegin, "UiUIi", "")
|
||||
BUILTIN(__builtin_tend, "UiUIi", "")
|
||||
|
|
|
@ -6560,6 +6560,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
|
|||
llvm::Function *F = CGM.getIntrinsic(ID);
|
||||
return Builder.CreateCall(F, Ops, "");
|
||||
}
|
||||
case PPC::BI__builtin_vsx_xvrspip:
|
||||
case PPC::BI__builtin_vsx_xvrdpip:
|
||||
llvm::Type *ResultType = ConvertType(E->getType());
|
||||
Value *X = EmitScalarExpr(E->getArg(0));
|
||||
ID = Intrinsic::ceil;
|
||||
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
|
||||
return Builder.CreateCall(F, X);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -110,14 +110,28 @@ static vector signed int __ATTRS_o_ai vec_abs(vector signed int __a) {
|
|||
return __builtin_altivec_vmaxsw(__a, -__a);
|
||||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_abs(vector signed long long __a) {
|
||||
return __builtin_altivec_vmaxsd(__a, -__a);
|
||||
}
|
||||
#endif
|
||||
|
||||
static vector float __ATTRS_o_ai vec_abs(vector float __a) {
|
||||
vector unsigned int __res =
|
||||
(vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
|
||||
return (vector float)__res;
|
||||
}
|
||||
|
||||
/* vec_abss */
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
static vector double __ATTRS_o_ai vec_abs(vector double __a) {
|
||||
vector unsigned long long __res = { 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF };
|
||||
__res &= (vector unsigned int)__a;
|
||||
return (vector double)__res;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_abss */
|
||||
#define __builtin_altivec_abss_v16qi vec_abss
|
||||
#define __builtin_altivec_abss_v8hi vec_abss
|
||||
#define __builtin_altivec_abss_v4si vec_abss
|
||||
|
@ -226,6 +240,16 @@ static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
|
|||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_add(vector signed long long __a, vector signed long long __b) {
|
||||
return __a + __b;
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_add(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return __a + __b;
|
||||
}
|
||||
|
||||
static vector signed __int128 __ATTRS_o_ai vec_add(vector signed __int128 __a,
|
||||
vector signed __int128 __b) {
|
||||
return __a + __b;
|
||||
|
@ -241,6 +265,13 @@ static vector float __ATTRS_o_ai vec_add(vector float __a, vector float __b) {
|
|||
return __a + __b;
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector double __ATTRS_o_ai
|
||||
vec_add(vector double __a, vector double __b) {
|
||||
return __a + __b;
|
||||
}
|
||||
#endif // __VSX__
|
||||
|
||||
/* vec_vaddubm */
|
||||
|
||||
#define __builtin_altivec_vaddubm vec_vaddubm
|
||||
|
@ -746,6 +777,24 @@ static vector float __ATTRS_o_ai vec_and(vector float __a,
|
|||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector double __ATTRS_o_ai vec_and(vector bool long long __a, vector double __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a & (vector unsigned long long)__b;
|
||||
return (vector double)__res;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_and(vector double __a, vector bool long long __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a & (vector unsigned long long)__b;
|
||||
return (vector double)__res;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_and(vector double __a, vector double __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a & (vector unsigned long long)__b;
|
||||
return (vector double)__res;
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_and(vector signed long long __a, vector signed long long __b) {
|
||||
return __a & __b;
|
||||
|
@ -1068,6 +1117,26 @@ static vector float __ATTRS_o_ai vec_andc(vector float __a,
|
|||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector double __ATTRS_o_ai
|
||||
vec_andc(vector bool long long __a, vector double __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a & ~(vector unsigned long long)__b;
|
||||
return (vector double)__res;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai
|
||||
vec_andc(vector double __a, vector bool long long __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a & ~(vector unsigned long long)__b;
|
||||
return (vector double)__res;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_andc(vector double __a, vector double __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a & ~(vector unsigned long long)__b;
|
||||
return (vector double)__res;
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_andc(vector signed long long __a, vector signed long long __b) {
|
||||
return __a & ~__b;
|
||||
|
@ -1338,11 +1407,20 @@ vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
|
|||
|
||||
/* vec_ceil */
|
||||
|
||||
static vector float __attribute__((__always_inline__))
|
||||
vec_ceil(vector float __a) {
|
||||
static vector float __ATTRS_o_ai vec_ceil(vector float __a) {
|
||||
#ifdef __VSX__
|
||||
return __builtin_vsx_xvrspip(__a);
|
||||
#else
|
||||
return __builtin_altivec_vrfip(__a);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector double __ATTRS_o_ai vec_ceil(vector double __a) {
|
||||
return __builtin_vsx_xvrdpip(__a);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_vrfip */
|
||||
|
||||
static vector float __attribute__((__always_inline__))
|
||||
|
@ -1414,16 +1492,56 @@ vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
|
|||
|
||||
static vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
|
||||
vector float __b) {
|
||||
#ifdef __VSX__
|
||||
return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
|
||||
#else
|
||||
return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpeq(vector double __a, vector double __b) {
|
||||
return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_cmpge */
|
||||
|
||||
static vector bool int __attribute__((__always_inline__))
|
||||
static vector bool int __ATTRS_o_ai
|
||||
vec_cmpge(vector float __a, vector float __b) {
|
||||
#ifdef __VSX__
|
||||
return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
|
||||
#else
|
||||
return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpge(vector double __a, vector double __b) {
|
||||
return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __POWER8_VECTOR__
|
||||
/* Forwrad declarations as the functions are used here */
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b);
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpgt(vector signed long long __a, vector signed long long __b);
|
||||
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpge(vector signed long long __a, vector signed long long __b) {
|
||||
return ~(vec_cmpgt(__b, __a));
|
||||
}
|
||||
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return ~(vec_cmpgt(__b, __a));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_vcmpgefp */
|
||||
|
||||
static vector bool int __attribute__((__always_inline__))
|
||||
|
@ -1476,9 +1594,19 @@ vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
|
|||
|
||||
static vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
|
||||
vector float __b) {
|
||||
#ifdef __VSX__
|
||||
return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
|
||||
#else
|
||||
return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmpgt(vector double __a, vector double __b) {
|
||||
return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
|
||||
}
|
||||
#endif
|
||||
/* vec_vcmpgtsb */
|
||||
|
||||
static vector bool char __attribute__((__always_inline__))
|
||||
|
@ -1530,47 +1658,85 @@ vec_vcmpgtfp(vector float __a, vector float __b) {
|
|||
|
||||
/* vec_cmple */
|
||||
|
||||
static vector bool int __attribute__((__always_inline__))
|
||||
static vector bool int __ATTRS_o_ai
|
||||
vec_cmple(vector float __a, vector float __b) {
|
||||
return (vector bool int)__builtin_altivec_vcmpgefp(__b, __a);
|
||||
return vec_cmpge(__b, __a);
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmple(vector double __a, vector double __b) {
|
||||
return vec_cmpge(__b, __a);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmple(vector signed long long __a, vector signed long long __b) {
|
||||
return vec_cmpge(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return vec_cmpge(__b, __a);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_cmplt */
|
||||
|
||||
static vector bool char __ATTRS_o_ai vec_cmplt(vector signed char __a,
|
||||
vector signed char __b) {
|
||||
return (vector bool char)__builtin_altivec_vcmpgtsb(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool char __ATTRS_o_ai vec_cmplt(vector unsigned char __a,
|
||||
vector unsigned char __b) {
|
||||
return (vector bool char)__builtin_altivec_vcmpgtub(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
|
||||
vector short __b) {
|
||||
return (vector bool short)__builtin_altivec_vcmpgtsh(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool short __ATTRS_o_ai vec_cmplt(vector unsigned short __a,
|
||||
vector unsigned short __b) {
|
||||
return (vector bool short)__builtin_altivec_vcmpgtuh(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, vector int __b) {
|
||||
return (vector bool int)__builtin_altivec_vcmpgtsw(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool int __ATTRS_o_ai vec_cmplt(vector unsigned int __a,
|
||||
vector unsigned int __b) {
|
||||
return (vector bool int)__builtin_altivec_vcmpgtuw(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
|
||||
vector float __b) {
|
||||
return (vector bool int)__builtin_altivec_vcmpgtfp(__b, __a);
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmplt(vector double __a, vector double __b) {
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmplt(vector signed long long __a, vector signed long long __b) {
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
static vector bool long long __ATTRS_o_ai
|
||||
vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_ctf */
|
||||
|
||||
static vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
// REQUIRES: powerpc-registered-target
|
||||
// RUN: %clang_cc1 -faltivec -target-feature +power8-vector -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
||||
// RUN: %clang_cc1 -faltivec -target-feature +power8-vector -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
|
||||
// RUN: not %clang_cc1 -faltivec -triple powerpc64-unknown-unknown -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC
|
||||
// RUN: not %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC
|
||||
// Added -target-feature +vsx above to avoid errors about "vector double" and to
|
||||
// generate the correct errors for functions that are only overloaded with VSX
|
||||
// (vec_cmpge, vec_cmple). Without this option, there is only one overload so
|
||||
// it is selected.
|
||||
|
||||
vector signed char vsc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
|
||||
vector unsigned char vuc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
|
||||
|
@ -11,6 +15,7 @@ vector bool int vbi = {0, -1, -1, 0};
|
|||
vector bool long long vbll = { 1, 0 };
|
||||
vector signed long long vsll = { 1, 2 };
|
||||
vector unsigned long long vull = { 1, 2 };
|
||||
vector double vda = { 1.e-11, -132.23e10 };
|
||||
|
||||
int res_i;
|
||||
vector signed char res_vsc;
|
||||
|
@ -21,10 +26,35 @@ vector bool int res_vbi;
|
|||
vector bool long long res_vbll;
|
||||
vector signed long long res_vsll;
|
||||
vector unsigned long long res_vull;
|
||||
vector double res_vd;
|
||||
|
||||
// CHECK-LABEL: define void @test1
|
||||
void test1() {
|
||||
|
||||
/* vec_abs */
|
||||
res_vsll = vec_abs(vsll);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %{{[0-9]]*}}, <2 x i64>
|
||||
// CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %{{[0-9]]*}}, <2 x i64>
|
||||
// CHECK-PPC: error: call to 'vec_abs' is ambiguous
|
||||
|
||||
res_vd = vec_abs(vda);
|
||||
// CHECK: store <2 x i64> <i64 9223372036854775807, i64 9223372036854775807>, <2 x i64>*
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK-LE: store <2 x i64> <i64 9223372036854775807, i64 9223372036854775807>, <2 x i64>*
|
||||
// CHECK-LE: and <2 x i64>
|
||||
// CHECK-PPC: error: call to 'vec_abs' is ambiguous
|
||||
|
||||
/* vec_add */
|
||||
res_vsll = vec_add(vsll, vsll);
|
||||
// CHECK: add <2 x i64>
|
||||
// CHECK-LE: add <2 x i64>
|
||||
// CHECK-PPC: error: call to 'vec_add' is ambiguous
|
||||
|
||||
res_vull = vec_add(vull, vull);
|
||||
// CHECK: add <2 x i64>
|
||||
// CHECK-LE: add <2 x i64>
|
||||
// CHECK-PPC: error: call to 'vec_add' is ambiguous
|
||||
|
||||
/* vec_cmpeq */
|
||||
res_vbll = vec_cmpeq(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vcmpequd
|
||||
|
@ -36,6 +66,28 @@ void test1() {
|
|||
// CHECK-LE: @llvm.ppc.altivec.vcmpequd
|
||||
// CHECK-PPC: error: call to 'vec_cmpeq' is ambiguous
|
||||
|
||||
/* vec_cmpge */
|
||||
res_vbll = vec_cmpge(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vcmpgtsd
|
||||
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd
|
||||
// CHECK-PPC: error: call to 'vec_cmpge' is ambiguous
|
||||
|
||||
res_vbll = vec_cmpge(vull, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vcmpgtud
|
||||
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud
|
||||
// CHECK-PPC: error: call to 'vec_cmpge' is ambiguous
|
||||
|
||||
/* vec_cmple */
|
||||
res_vbll = vec_cmple(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vcmpgtsd
|
||||
// CHECK-LE: @llvm.ppc.altivec.vcmpgtsd
|
||||
// CHECK-PPC: error: call to 'vec_cmple' is ambiguous
|
||||
|
||||
res_vbll = vec_cmple(vull, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vcmpgtud
|
||||
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud
|
||||
// CHECK-PPC: error: call to 'vec_cmple' is ambiguous
|
||||
|
||||
/* vec_cmpgt */
|
||||
res_vbll = vec_cmpgt(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vcmpgtsd
|
||||
|
@ -47,6 +99,17 @@ void test1() {
|
|||
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud
|
||||
// CHECK-PPC: error: call to 'vec_cmpgt' is ambiguous
|
||||
|
||||
/* vec_cmplt */
|
||||
res_vbll = vec_cmplt(vsll, vsll);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
|
||||
// CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
|
||||
// CHECK-PPC: error: call to 'vec_cmplt' is ambiguous
|
||||
|
||||
res_vbll = vec_cmplt(vull, vull);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
|
||||
// CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
|
||||
// CHECK-PPC: error: call to 'vec_cmplt' is ambiguous
|
||||
|
||||
/* ----------------------- predicates --------------------------- */
|
||||
/* vec_all_eq */
|
||||
res_i = vec_all_eq(vsll, vsll);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// REQUIRES: powerpc-registered-target
|
||||
// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
||||
// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
||||
|
||||
vector unsigned char vuc = { 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
0, 1, 2, 3, 4, 5, 6, 7};
|
||||
|
@ -16,14 +17,98 @@ vector float res_vf;
|
|||
vector double res_vd;
|
||||
vector signed int res_vsi;
|
||||
vector unsigned int res_vui;
|
||||
vector bool int res_vbi;
|
||||
vector bool long long res_vbll;
|
||||
vector signed long long res_vsll;
|
||||
vector unsigned long long res_vull;
|
||||
double res_d;
|
||||
|
||||
void dummy() { }
|
||||
|
||||
void test1() {
|
||||
// CHECK-LABEL: define void @test1
|
||||
|
||||
res_vd = vec_add(vd, vd);
|
||||
// CHECK: fadd <2 x double>
|
||||
|
||||
res_vd = vec_and(vbll, vd);
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
|
||||
|
||||
res_vd = vec_and(vd, vbll);
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
|
||||
|
||||
res_vd = vec_and(vd, vd);
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
|
||||
|
||||
dummy();
|
||||
// CHECK: call void @dummy()
|
||||
|
||||
res_vd = vec_andc(vbll, vd);
|
||||
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
|
||||
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
|
||||
|
||||
dummy();
|
||||
// CHECK: call void @dummy()
|
||||
|
||||
res_vd = vec_andc(vd, vbll);
|
||||
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
|
||||
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
|
||||
|
||||
dummy();
|
||||
// CHECK: call void @dummy()
|
||||
|
||||
res_vd = vec_andc(vd, vd);
|
||||
// CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
|
||||
// CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
|
||||
|
||||
dummy();
|
||||
// CHECK: call void @dummy()
|
||||
|
||||
res_vd = vec_ceil(vd);
|
||||
// CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
|
||||
|
||||
res_vf = vec_ceil(vf);
|
||||
// CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
|
||||
|
||||
res_vbll = vec_cmpeq(vd, vd);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
|
||||
|
||||
res_vbi = vec_cmpeq(vf, vf);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
|
||||
res_vbll = vec_cmpge(vd, vd);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
|
||||
|
||||
res_vbi = vec_cmpge(vf, vf);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
|
||||
res_vbll = vec_cmpgt(vd, vd);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
|
||||
|
||||
res_vbi = vec_cmpgt(vf, vf);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
|
||||
res_vbll = vec_cmple(vd, vd);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
|
||||
|
||||
res_vbi = vec_cmple(vf, vf);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
|
||||
res_vbll = vec_cmplt(vd, vd);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
|
||||
|
||||
res_vbi = vec_cmplt(vf, vf);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
|
||||
/* vec_div */
|
||||
res_vf = vec_div(vf, vf);
|
||||
// CHECK: @llvm.ppc.vsx.xvdivsp
|
||||
|
|
Loading…
Reference in New Issue