forked from OSchip/llvm-project
[PowerPC] Remove unnecessary 64-bit guards from altivec.h
A number of functions in the header have guards for 64-bit only that were presumably added as some of the functions in the blocks use vector __int128 which is only available in 64-bit mode. A more appropriate guard (__SIZEOF_INT128__) has been added for those functions since, making the 64-bit guards redundant. This patch removes those guards as they inadvertently guard code that uses vector long long which does not actually require 64-bit mode.
This commit is contained in:
parent
0da172b176
commit
41ce5ec5f6
|
@ -124,7 +124,7 @@ vec_abs(vector signed int __a) {
|
|||
return __builtin_altivec_vmaxsw(__a, -__a);
|
||||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static __inline__ vector signed long long __ATTRS_o_ai
|
||||
vec_abs(vector signed long long __a) {
|
||||
return __builtin_altivec_vmaxsd(__a, -__a);
|
||||
|
@ -282,7 +282,7 @@ vec_add(vector unsigned int __a, vector bool int __b) {
|
|||
return __a + (vector unsigned int)__b;
|
||||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static __inline__ vector signed long long __ATTRS_o_ai
|
||||
vec_add(vector signed long long __a, vector signed long long __b) {
|
||||
return __a + __b;
|
||||
|
@ -333,7 +333,7 @@ vec_add(vector unsigned long long __a, vector unsigned long long __b) {
|
|||
return (vector unsigned long long)vec_add((vector signed long long)__a,
|
||||
(vector signed long long)__b);
|
||||
}
|
||||
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#endif // __POWER8_VECTOR__
|
||||
|
||||
static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
|
||||
vector float __b) {
|
||||
|
@ -349,7 +349,7 @@ static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
|
|||
|
||||
/* vec_adde */
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
#ifdef __SIZEOF_INT128__
|
||||
static __inline__ vector signed __int128 __ATTRS_o_ai
|
||||
vec_adde(vector signed __int128 __a, vector signed __int128 __b,
|
||||
|
@ -389,7 +389,7 @@ vec_adde(vector unsigned int __a, vector unsigned int __b,
|
|||
|
||||
/* vec_addec */
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
#ifdef __SIZEOF_INT128__
|
||||
static __inline__ vector signed __int128 __ATTRS_o_ai
|
||||
vec_addec(vector signed __int128 __a, vector signed __int128 __b,
|
||||
|
@ -410,6 +410,7 @@ vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
|
|||
return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
|
||||
}
|
||||
|
||||
#ifdef __powerpc64__
|
||||
static __inline__ vector signed int __ATTRS_o_ai
|
||||
vec_addec(vector signed int __a, vector signed int __b,
|
||||
vector signed int __c) {
|
||||
|
@ -452,8 +453,8 @@ vec_addec(vector unsigned int __a, vector unsigned int __b,
|
|||
vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // __powerpc64__
|
||||
#endif // __POWER8_VECTOR__
|
||||
|
||||
/* vec_vaddubm */
|
||||
|
||||
|
@ -579,7 +580,7 @@ vec_addc(vector unsigned int __a, vector unsigned int __b) {
|
|||
return __builtin_altivec_vaddcuw(__a, __b);
|
||||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
#ifdef __SIZEOF_INT128__
|
||||
static __inline__ vector signed __int128 __ATTRS_o_ai
|
||||
vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
|
||||
|
@ -12026,7 +12027,7 @@ vec_subc(vector unsigned int __a, vector unsigned int __b) {
|
|||
return __builtin_altivec_vsubcuw(__a, __b);
|
||||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
#ifdef __SIZEOF_INT128__
|
||||
static __inline__ vector unsigned __int128 __ATTRS_o_ai
|
||||
vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
|
||||
|
@ -12043,7 +12044,7 @@ static __inline__ vector unsigned char __attribute__((__always_inline__))
|
|||
vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
|
||||
return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
|
||||
}
|
||||
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#endif // __POWER8_VECTOR__
|
||||
|
||||
/* vec_vsubcuw */
|
||||
|
||||
|
@ -12246,7 +12247,7 @@ vec_vsubuws(vector unsigned int __a, vector bool int __b) {
|
|||
return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
|
||||
}
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
/* vec_vsubuqm */
|
||||
|
||||
#ifdef __SIZEOF_INT128__
|
||||
|
@ -12328,6 +12329,7 @@ vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc64__
|
||||
static __inline__ vector signed int __ATTRS_o_ai
|
||||
vec_subec(vector signed int __a, vector signed int __b,
|
||||
vector signed int __c) {
|
||||
|
@ -12339,6 +12341,7 @@ vec_subec(vector unsigned int __a, vector unsigned int __b,
|
|||
vector unsigned int __c) {
|
||||
return vec_addec(__a, ~__b, __c);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __SIZEOF_INT128__
|
||||
static __inline__ vector signed __int128 __ATTRS_o_ai
|
||||
|
@ -12359,7 +12362,7 @@ vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
|
|||
vector unsigned char __c) {
|
||||
return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
|
||||
}
|
||||
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#endif // __POWER8_VECTOR__
|
||||
|
||||
static __inline__ vector signed int __ATTRS_o_ai
|
||||
vec_sube(vector signed int __a, vector signed int __b,
|
||||
|
@ -18083,7 +18086,7 @@ static vector double __ATTRS_o_ai vec_neg(vector double __a) {
|
|||
|
||||
#endif
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __VSX__
|
||||
static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
|
||||
return -__a;
|
||||
}
|
||||
|
@ -18112,7 +18115,7 @@ static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
|
|||
|
||||
#endif
|
||||
|
||||
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
|
||||
return __builtin_altivec_vminsd(__a, -__a);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,293 @@
|
|||
// REQUIRES: powerpc-registered-target
|
||||
// RUN: %clang_cc1 -target-feature +altivec -target-feature +power8-vector \
|
||||
// RUN: -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
||||
|
||||
#include <altivec.h>
|
||||
vector signed long long vsll1, vsll2, vsll3;
|
||||
vector signed char vsc;
|
||||
vector bool long long vbll;
|
||||
|
||||
void dummy();
|
||||
void test() {
|
||||
vec_abs(vsll1);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vmaxsd
|
||||
dummy();
|
||||
// CHECK-NEXT: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_add(vsll1, vsll2);
|
||||
// CHECK: add <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_and(vsll1, vsll2);
|
||||
// CHECK: and <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_vand(vsll1, vsll2);
|
||||
// CHECK: and <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_andc(vsll1, vsll2);
|
||||
// CHECK: xor <2 x i64>
|
||||
// CHECK: and <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_vandc(vsll1, vsll2);
|
||||
// CHECK: xor <2 x i64>
|
||||
// CHECK: and <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cmpeq(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpequd
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cmpne(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpequd
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cmpgt(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cmpge(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cmple(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cmplt(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_popcnt(vsll1);
|
||||
// CHECK: call <2 x i64> @llvm.ctpop.v2i64
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_cntlz(vsll1);
|
||||
// CHECK: call <2 x i64> @llvm.ctlz.v2i64
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_float2(vsll1, vsll2);
|
||||
// CHECK: sitofp i64 %{{.*}} to float
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_floate(vsll1);
|
||||
// CHECK: call <4 x float> @llvm.ppc.vsx.xvcvsxdsp
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_floato(vsll1);
|
||||
// CHECK: call <4 x float> @llvm.ppc.vsx.xvcvsxdsp
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_double(vsll1);
|
||||
// CHECK: sitofp <2 x i64> %{{.*}} to <2 x double>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_div(vsll1, vsll2);
|
||||
// CHECK: sdiv <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_eqv(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_max(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vmaxsd
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_mergeh(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_mergel(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_mergee(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_mergeo(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_min(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vminsd
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_mul(vsll1, vsll2);
|
||||
// CHECK: mul <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_nand(vsll1, vsll2);
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_nor(vsll1, vsll2);
|
||||
// CHECK: or <2 x i64>
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_or(vsll1, vsll2);
|
||||
// CHECK: or <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_orc(vsll1, vsll2);
|
||||
// CHECK: xor <2 x i64>
|
||||
// CHECK: or <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_vor(vsll1, vsll2);
|
||||
// CHECK: or <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_pack(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_vpkudum(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_packs(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vpksdss
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_vpkudus(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vpkudus
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_packsu(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vpksdus
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_rl(vsll1, vsll2);
|
||||
// CHECK: call <2 x i64> @llvm.ppc.altivec.vrld
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sel(vsll1, vsll2, vbll);
|
||||
// CHECK: xor <2 x i64>
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: and <2 x i64>
|
||||
// CHECK: or <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sl(vsll1, vsll2);
|
||||
// CHECK: shl <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sld(vsll1, vsll2, 2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sldw(vsll1, vsll2, 2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sll(vsll1, vsll2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vsl
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_slo(vsll1, vsc);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vslo
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_splat(vsll1, 2);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sr(vsll1, vsll2);
|
||||
// CHECK: lshr <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sra(vsll1, vsll2);
|
||||
// CHECK: ashr <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_srl(vsll1, vsc);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vsr
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sro(vsll1, vsc);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vsro
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_sub(vsll1, vsll2);
|
||||
// CHECK: sub <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_xor(vsll1, vsll2);
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_vxor(vsll1, vsll2);
|
||||
// CHECK: xor <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_extract(vsll1, 2);
|
||||
// CHECK: extractelement <2 x i64>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_all_eq(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpequd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_all_ge(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_all_gt(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_all_le(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_all_lt(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_all_ne(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpequd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_any_eq(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpequd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_any_ge(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_any_gt(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_any_le(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_any_lt(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpgtsd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_any_ne(vsll1, vsll2);
|
||||
// CHECK: call i32 @llvm.ppc.altivec.vcmpequd.p
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_gbb(vsll1);
|
||||
// CHECK: call <16 x i8> @llvm.ppc.altivec.vgbbd
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_reve(vsll1);
|
||||
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> <i32 1, i32 0>
|
||||
dummy();
|
||||
// CHECK: call void bitcast (void (...)* @dummy to void ()*)()
|
||||
vec_revb(vsll1);
|
||||
// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm
|
||||
}
|
|
@ -6,6 +6,9 @@
|
|||
// RUN: %clang_cc1 -O2 -target-feature +altivec -target-feature +power8-vector \
|
||||
// RUN: -triple powerpc64-aix-unknown -emit-llvm %s -o - | FileCheck \
|
||||
// RUN: %s -check-prefix=CHECK-AIX
|
||||
// RUN: %clang_cc1 -O2 -target-feature +altivec -target-feature +power8-vector \
|
||||
// RUN: -triple powerpc-aix-unknown -emit-llvm %s -o - | FileCheck \
|
||||
// RUN: %s -check-prefix=CHECK-AIX
|
||||
#include <altivec.h>
|
||||
// CHECK-LE-LABEL: @test_subc(
|
||||
// CHECK-LE-NEXT: entry:
|
||||
|
|
Loading…
Reference in New Issue