forked from OSchip/llvm-project
Add missing builtins to altivec.h for ABI compliance (vol. 4)
This patch corresponds to review: http://reviews.llvm.org/D11184 A number of new interfaces for altivec.h (as mandated by the ABI): vector float vec_cpsgn(vector float, vector float) vector double vec_cpsgn(vector double, vector double) vector double vec_or(vector bool long long, vector double) vector double vec_or(vector double, vector bool long long) vector double vec_re(vector double) vector signed char vec_cntlz(vector signed char) vector unsigned char vec_cntlz(vector unsigned char) vector short vec_cntlz(vector short) vector unsigned short vec_cntlz(vector unsigned short) vector int vec_cntlz(vector int) vector unsigned int vec_cntlz(vector unsigned int) vector signed long long vec_cntlz(vector signed long long) vector unsigned long long vec_cntlz(vector unsigned long long) vector signed char vec_nand(vector bool signed char, vector signed char) vector signed char vec_nand(vector signed char, vector bool signed char) vector signed char vec_nand(vector signed char, vector signed char) vector unsigned char vec_nand(vector bool unsigned char, vector unsigned char) vector unsigned char vec_nand(vector unsigned char, vector bool unsigned char) vector unsigned char vec_nand(vector unsigned char, vector unsigned char) vector short vec_nand(vector bool short, vector short) vector short vec_nand(vector short, vector bool short) vector short vec_nand(vector short, vector short) vector unsigned short vec_nand(vector bool unsigned short, vector unsigned short) vector unsigned short vec_nand(vector unsigned short, vector bool unsigned short) vector unsigned short vec_nand(vector unsigned short, vector unsigned short) vector int vec_nand(vector bool int, vector int) vector int vec_nand(vector int, vector bool int) vector int vec_nand(vector int, vector int) vector unsigned int vec_nand(vector bool unsigned int, vector unsigned int) vector unsigned int vec_nand(vector unsigned int, vector bool unsigned int) vector unsigned int vec_nand(vector unsigned int, vector unsigned int) vector signed long long vec_nand(vector bool long long, vector signed long long) vector signed long long vec_nand(vector signed long long, vector bool long long) vector signed long long vec_nand(vector signed long long, vector signed long long) vector unsigned long long vec_nand(vector bool long long, vector unsigned long long) vector unsigned long long vec_nand(vector unsigned long long, vector bool long long) vector unsigned long long vec_nand(vector unsigned long long, vector unsigned long long) vector signed char vec_orc(vector bool signed char, vector signed char) vector signed char vec_orc(vector signed char, vector bool signed char) vector signed char vec_orc(vector signed char, vector signed char) vector unsigned char vec_orc(vector bool unsigned char, vector unsigned char) vector unsigned char vec_orc(vector unsigned char, vector bool unsigned char) vector unsigned char vec_orc(vector unsigned char, vector unsigned char) vector short vec_orc(vector bool short, vector short) vector short vec_orc(vector short, vector bool short) vector short vec_orc(vector short, vector short) vector unsigned short vec_orc(vector bool unsigned short, vector unsigned short) vector unsigned short vec_orc(vector unsigned short, vector bool unsigned short) vector unsigned short vec_orc(vector unsigned short, vector unsigned short) vector int vec_orc(vector bool int, vector int) vector int vec_orc(vector int, vector bool int) vector int vec_orc(vector int, vector int) vector unsigned int vec_orc(vector bool unsigned int, vector unsigned int) vector unsigned int vec_orc(vector unsigned int, vector bool unsigned int) vector unsigned int vec_orc(vector unsigned int, vector unsigned int) vector signed long long vec_orc(vector bool long long, vector signed long long) vector signed long long vec_orc(vector signed long long, vector bool long long) vector signed long long vec_orc(vector signed long long, vector signed long long) vector unsigned long long vec_orc(vector bool long long, vector unsigned long long) vector unsigned long long vec_orc(vector unsigned long long, vector bool long long) vector unsigned long long vec_orc(vector unsigned long long, vector unsigned long long) vector signed char vec_div(vector signed char, vector signed char) vector unsigned char vec_div(vector unsigned char, vector unsigned char) vector signed short vec_div(vector signed short, vector signed short) vector unsigned short vec_div(vector unsigned short, vector unsigned short) vector signed int vec_div(vector signed int, vector signed int) vector unsigned int vec_div(vector unsigned int, vector unsigned int) vector signed long long vec_div(vector signed long long, vector signed long long) vector unsigned long long vec_div(vector unsigned long long, vector unsigned long long) vector unsigned char vec_mul(vector unsigned char, vector unsigned char) vector unsigned int vec_mul(vector unsigned int, vector unsigned int) vector unsigned long long vec_mul(vector unsigned long long, vector unsigned long long) vector unsigned short vec_mul(vector unsigned short, vector unsigned short) vector signed char vec_mul(vector signed char, vector signed char) vector signed int vec_mul(vector signed int, vector signed int) vector signed long long vec_mul(vector signed long long, vector signed long long) vector signed short vec_mul(vector signed short, vector signed short) vector signed long long vec_mergeh(vector signed long long, vector signed long long) vector signed long long vec_mergeh(vector signed long long, vector bool long long) vector signed long long vec_mergeh(vector bool long long, vector signed long long) vector unsigned long long vec_mergeh(vector unsigned long long, vector unsigned long long) vector unsigned long long vec_mergeh(vector unsigned long long, vector bool long long) vector unsigned long long vec_mergeh(vector bool long long, vector unsigned long long) vector double vec_mergeh(vector double, vector double) vector double vec_mergeh(vector double, vector bool long long) vector double vec_mergeh(vector bool long long, vector double) vector signed long long vec_mergel(vector signed long long, vector signed long long) vector signed long long vec_mergel(vector signed long long, vector bool long long) vector signed long long vec_mergel(vector bool long long, vector signed long long) vector unsigned long long vec_mergel(vector unsigned long long, vector unsigned long long) vector unsigned long long vec_mergel(vector unsigned long long, vector bool long long) vector unsigned long long vec_mergel(vector bool long long, vector unsigned long long) vector double vec_mergel(vector double, vector double) vector double vec_mergel(vector double, vector bool long long) vector double vec_mergel(vector bool long long, vector double) vector signed int vec_pack(vector signed long long, vector signed long long) vector unsigned int vec_pack(vector unsigned long long, vector unsigned long long) vector bool int vec_pack(vector bool long long, vector bool long long) llvm-svn: 242171
This commit is contained in:
parent
9f8ff9c473
commit
6c363ed67a
|
@ -248,6 +248,11 @@ BUILTIN(__builtin_altivec_crypto_vpmsumh, "V8UsV8UsV8Us", "")
|
|||
BUILTIN(__builtin_altivec_crypto_vpmsumw, "V4UiV4UiV4Ui", "")
|
||||
BUILTIN(__builtin_altivec_crypto_vpmsumd, "V2ULLiV2ULLiV2ULLi", "")
|
||||
|
||||
BUILTIN(__builtin_altivec_vclzb, "V16UcV16Uc", "")
|
||||
BUILTIN(__builtin_altivec_vclzh, "V8UsV8Us", "")
|
||||
BUILTIN(__builtin_altivec_vclzw, "V4UiV4Ui", "")
|
||||
BUILTIN(__builtin_altivec_vclzd, "V2ULLiV2ULLi", "")
|
||||
|
||||
// VSX built-ins.
|
||||
|
||||
BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "")
|
||||
|
@ -306,6 +311,9 @@ BUILTIN(__builtin_vsx_xvnmaddasp, "V4fV4fV4fV4f", "")
|
|||
BUILTIN(__builtin_vsx_xvnmsubadp, "V2dV2dV2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvnmsubasp, "V4fV4fV4fV4f", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvredp, "V2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvresp, "V4fV4f", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvrsqrtedp, "V2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvrsqrtesp, "V4fV4f", "")
|
||||
|
||||
|
@ -314,6 +322,9 @@ BUILTIN(__builtin_vsx_xvsqrtsp, "V4fV4f", "")
|
|||
|
||||
BUILTIN(__builtin_vsx_xxleqv, "V4UiV4UiV4Ui", "")
|
||||
|
||||
BUILTIN(__builtin_vsx_xvcpsgndp, "V2dV2dV2d", "")
|
||||
BUILTIN(__builtin_vsx_xvcpsgnsp, "V4fV4fV4f", "")
|
||||
|
||||
// HTM builtins
|
||||
BUILTIN(__builtin_tbegin, "UiUIi", "")
|
||||
BUILTIN(__builtin_tend, "UiUIi", "")
|
||||
|
|
|
@ -6642,6 +6642,27 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
|
|||
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
|
||||
return Builder.CreateCall(F, X);
|
||||
}
|
||||
// Count leading zeros
|
||||
case PPC::BI__builtin_altivec_vclzb:
|
||||
case PPC::BI__builtin_altivec_vclzh:
|
||||
case PPC::BI__builtin_altivec_vclzw:
|
||||
case PPC::BI__builtin_altivec_vclzd: {
|
||||
llvm::Type *ResultType = ConvertType(E->getType());
|
||||
Value *X = EmitScalarExpr(E->getArg(0));
|
||||
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
|
||||
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
|
||||
return Builder.CreateCall(F, {X, Undef});
|
||||
}
|
||||
// Copy sign
|
||||
case PPC::BI__builtin_vsx_xvcpsgnsp:
|
||||
case PPC::BI__builtin_vsx_xvcpsgndp: {
|
||||
llvm::Type *ResultType = ConvertType(E->getType());
|
||||
Value *X = EmitScalarExpr(E->getArg(0));
|
||||
Value *Y = EmitScalarExpr(E->getArg(1));
|
||||
ID = Intrinsic::copysign;
|
||||
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
|
||||
return Builder.CreateCall(F, {X, Y});
|
||||
}
|
||||
// Rounding/truncation
|
||||
case PPC::BI__builtin_vsx_xvrspip:
|
||||
case PPC::BI__builtin_vsx_xvrdpip:
|
||||
|
|
|
@ -1741,6 +1741,48 @@ static vector bool long long __ATTRS_o_ai
|
|||
vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return vec_cmpgt(__b, __a);
|
||||
}
|
||||
|
||||
/* vec_cntlz */
|
||||
|
||||
static vector signed char __ATTRS_o_ai vec_cntlz(vector signed char __a) {
|
||||
return __builtin_altivec_vclzb(__a);
|
||||
}
|
||||
static vector unsigned char __ATTRS_o_ai vec_cntlz(vector unsigned char __a) {
|
||||
return __builtin_altivec_vclzb(__a);
|
||||
}
|
||||
static vector signed short __ATTRS_o_ai vec_cntlz(vector signed short __a) {
|
||||
return __builtin_altivec_vclzh(__a);
|
||||
}
|
||||
static vector unsigned short __ATTRS_o_ai vec_cntlz(vector unsigned short __a) {
|
||||
return __builtin_altivec_vclzh(__a);
|
||||
}
|
||||
static vector signed int __ATTRS_o_ai vec_cntlz(vector signed int __a) {
|
||||
return __builtin_altivec_vclzw(__a);
|
||||
}
|
||||
static vector unsigned int __ATTRS_o_ai vec_cntlz(vector unsigned int __a) {
|
||||
return __builtin_altivec_vclzw(__a);
|
||||
}
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_cntlz(vector signed long long __a) {
|
||||
return __builtin_altivec_vclzd(__a);
|
||||
}
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_cntlz(vector unsigned long long __a) {
|
||||
return __builtin_altivec_vclzd(__a);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_cpsgn */
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector float __ATTRS_o_ai vec_cpsgn(vector float __a, vector float __b) {
|
||||
return __builtin_vsx_xvcpsgnsp(__a, __b);
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
|
||||
vector double __b) {
|
||||
return __builtin_vsx_xvcpsgndp(__a, __b);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_ctf */
|
||||
|
@ -1796,14 +1838,58 @@ vec_vctuxs(vector float __a, int __b) {
|
|||
}
|
||||
|
||||
/* vec_div */
|
||||
|
||||
/* Integer vector divides (vectors are scalarized, elements divided
|
||||
and the vectors reassembled).
|
||||
*/
|
||||
static vector signed char __ATTRS_o_ai vec_div(vector signed char __a,
|
||||
vector signed char __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_div(vector unsigned char __a,
|
||||
vector unsigned char __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_div(vector signed short __a,
|
||||
vector signed short __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_div(vector unsigned short __a,
|
||||
vector unsigned short __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_div(vector signed int __a,
|
||||
vector signed int __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_div(vector unsigned int __a,
|
||||
vector unsigned int __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_div(vector signed long long __a, vector signed long long __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_div(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector float __ATTRS_o_ai vec_div(vector float __a, vector float __b) {
|
||||
return __builtin_vsx_xvdivsp(__a, __b);
|
||||
return __a / __b;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_div(vector double __a,
|
||||
vector double __b) {
|
||||
return __builtin_vsx_xvdivdp(__a, __b);
|
||||
return __a / __b;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -3116,6 +3202,86 @@ static vector float __ATTRS_o_ai vec_mergeh(vector float __a,
|
|||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mergeh(vector signed long long __a, vector signed long long __b) {
|
||||
return vec_perm(__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mergeh(vector signed long long __a, vector bool long long __b) {
|
||||
return vec_perm(__a, (vector signed long long)__b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mergeh(vector bool long long __a, vector signed long long __b) {
|
||||
return vec_perm((vector signed long long)__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return vec_perm(__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
|
||||
return vec_perm(__a, (vector unsigned long long)__b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
|
||||
return vec_perm((vector unsigned long long)__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
|
||||
vector double __b) {
|
||||
return vec_perm(__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
|
||||
vector bool long long __b) {
|
||||
return vec_perm(__a, (vector double)__b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
static vector double __ATTRS_o_ai vec_mergeh(vector bool long long __a,
|
||||
vector double __b) {
|
||||
return vec_perm((vector double)__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03,
|
||||
0x04, 0x05, 0x06, 0x07,
|
||||
0x10, 0x11, 0x12, 0x13,
|
||||
0x14, 0x15, 0x16, 0x17));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_vmrghb */
|
||||
|
||||
#define __builtin_altivec_vmrghb vec_vmrghb
|
||||
|
@ -3304,6 +3470,81 @@ static vector float __ATTRS_o_ai vec_mergel(vector float __a,
|
|||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mergel(vector signed long long __a, vector signed long long __b) {
|
||||
return vec_perm(__a, __b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mergel(vector signed long long __a, vector bool long long __b) {
|
||||
return vec_perm(__a, (vector signed long long)__b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mergel(vector bool long long __a, vector signed long long __b) {
|
||||
return vec_perm((vector signed long long)__a, __b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return vec_perm(__a, __b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mergel(vector unsigned long long __a, vector bool long long __b) {
|
||||
return vec_perm(__a, (vector unsigned long long)__b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mergel(vector bool long long __a, vector unsigned long long __b) {
|
||||
return vec_perm((vector unsigned long long)__a, __b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector double __ATTRS_o_ai
|
||||
vec_mergel(vector double __a, vector double __b) {
|
||||
return vec_perm(__a, __b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector double __ATTRS_o_ai
|
||||
vec_mergel(vector double __a, vector bool long long __b) {
|
||||
return vec_perm(__a, (vector double)__b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
static vector double __ATTRS_o_ai
|
||||
vec_mergel(vector bool long long __a, vector double __b) {
|
||||
return vec_perm((vector double)__a, __b,
|
||||
(vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
|
||||
0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x18, 0X19, 0x1A, 0x1B,
|
||||
0x1C, 0x1D, 0x1E, 0x1F));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_vmrglb */
|
||||
|
||||
#define __builtin_altivec_vmrglb vec_vmrglb
|
||||
|
@ -3902,6 +4143,52 @@ static void __ATTRS_o_ai vec_mtvscr(vector float __a) {
|
|||
|
||||
/* vec_mul */
|
||||
|
||||
/* Integer vector multiplication will involve multiplication of the odd/even
|
||||
elements separately, then truncating the results and moving to the
|
||||
result vector.
|
||||
*/
|
||||
static vector signed char __ATTRS_o_ai vec_mul(vector signed char __a,
|
||||
vector signed char __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_mul(vector unsigned char __a,
|
||||
vector unsigned char __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_mul(vector signed short __a,
|
||||
vector signed short __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_mul(vector unsigned short __a,
|
||||
vector unsigned short __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_mul(vector signed int __a,
|
||||
vector signed int __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_mul(vector unsigned int __a,
|
||||
vector unsigned int __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_mul(vector signed long long __a, vector signed long long __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
#endif
|
||||
|
||||
static vector float __ATTRS_o_ai vec_mul(vector float __a, vector float __b) {
|
||||
return __a * __b;
|
||||
}
|
||||
|
@ -4118,6 +4405,134 @@ vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
|
|||
#endif
|
||||
}
|
||||
|
||||
/* vec_nand */
|
||||
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
|
||||
vector signed char __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed char __ATTRS_o_ai vec_nand(vector signed char __a,
|
||||
vector bool char __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed char __ATTRS_o_ai vec_nand(vector bool char __a,
|
||||
vector signed char __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
|
||||
vector unsigned char __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_nand(vector unsigned char __a,
|
||||
vector bool char __b) {
|
||||
return ~(__a & __b);
|
||||
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_nand(vector bool char __a,
|
||||
vector unsigned char __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
|
||||
vector signed short __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
|
||||
vector bool short __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_nand(vector bool short __a,
|
||||
vector signed short __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
|
||||
vector unsigned short __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
|
||||
vector bool short __b) {
|
||||
return ~(__a & __b);
|
||||
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_nand(vector bool short __a,
|
||||
vector unsigned short __b) {
|
||||
return ~(__a & __b);
|
||||
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
|
||||
vector signed int __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
|
||||
vector bool int __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_nand(vector bool int __a,
|
||||
vector signed int __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
|
||||
vector unsigned int __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_nand(vector unsigned int __a,
|
||||
vector bool int __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_nand(vector bool int __a,
|
||||
vector unsigned int __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_nand(vector signed long long __a, vector signed long long __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_nand(vector signed long long __a, vector bool long long __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_nand(vector bool long long __a, vector signed long long __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_nand(vector unsigned long long __a, vector bool long long __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_nand(vector bool long long __a, vector unsigned long long __b) {
|
||||
return ~(__a & __b);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* vec_nmadd */
|
||||
|
||||
#ifdef __VSX__
|
||||
|
@ -4411,6 +4826,16 @@ static vector float __ATTRS_o_ai vec_or(vector float __a, vector bool int __b) {
|
|||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector double __ATTRS_o_ai vec_or(vector bool long long __a,
|
||||
vector double __b) {
|
||||
return (vector unsigned long long)__a | (vector unsigned long long)__b;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_or(vector double __a,
|
||||
vector bool long long __b) {
|
||||
return (vector unsigned long long)__a | (vector unsigned long long)__b;
|
||||
}
|
||||
|
||||
static vector double __ATTRS_o_ai vec_or(vector double __a, vector double __b) {
|
||||
vector unsigned long long __res =
|
||||
(vector unsigned long long)__a | (vector unsigned long long)__b;
|
||||
|
@ -4453,6 +4878,128 @@ static vector bool long long __ATTRS_o_ai vec_or(vector bool long long __a,
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef __POWER8_VECTOR__
|
||||
static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
|
||||
vector signed char __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed char __ATTRS_o_ai vec_orc(vector signed char __a,
|
||||
vector bool char __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed char __ATTRS_o_ai vec_orc(vector bool char __a,
|
||||
vector signed char __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
|
||||
vector unsigned char __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_orc(vector unsigned char __a,
|
||||
vector bool char __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned char __ATTRS_o_ai vec_orc(vector bool char __a,
|
||||
vector unsigned char __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
|
||||
vector signed short __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
|
||||
vector bool short __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed short __ATTRS_o_ai vec_orc(vector bool short __a,
|
||||
vector signed short __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
|
||||
vector unsigned short __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai vec_orc(vector unsigned short __a,
|
||||
vector bool short __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned short __ATTRS_o_ai
|
||||
vec_orc(vector bool short __a, vector unsigned short __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
|
||||
vector signed int __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
|
||||
vector bool int __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed int __ATTRS_o_ai vec_orc(vector bool int __a,
|
||||
vector signed int __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
|
||||
vector unsigned int __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_orc(vector unsigned int __a,
|
||||
vector bool int __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned int __ATTRS_o_ai vec_orc(vector bool int __a,
|
||||
vector unsigned int __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_orc(vector signed long long __a, vector signed long long __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai vec_orc(vector signed long long __a,
|
||||
vector bool long long __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector signed long long __ATTRS_o_ai
|
||||
vec_orc(vector bool long long __a, vector signed long long __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_orc(vector unsigned long long __a, vector bool long long __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
|
||||
static vector unsigned long long __ATTRS_o_ai
|
||||
vec_orc(vector bool long long __a, vector unsigned long long __b) {
|
||||
return __a | ~__b;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_vor */
|
||||
|
||||
static vector signed char __ATTRS_o_ai vec_vor(vector signed char __a,
|
||||
|
@ -4707,6 +5254,53 @@ static vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector signed int __ATTRS_o_ai vec_pack(vector signed long long __a,
|
||||
vector signed long long __b) {
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
return (vector signed int)vec_perm(
|
||||
__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
|
||||
0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
|
||||
#else
|
||||
return (vector signed int)vec_perm(
|
||||
__a, __b,
|
||||
(vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
|
||||
#endif
|
||||
}
|
||||
static vector unsigned int __ATTRS_o_ai
|
||||
vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
return (vector unsigned int)vec_perm(
|
||||
__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
|
||||
0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
|
||||
#else
|
||||
return (vector unsigned int)vec_perm(
|
||||
__a, __b,
|
||||
(vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
|
||||
#endif
|
||||
}
|
||||
|
||||
static vector bool int __ATTRS_o_ai vec_pack(vector bool long long __a,
|
||||
vector bool long long __b) {
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
return (vector bool int)vec_perm(
|
||||
__a, __b,
|
||||
(vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
|
||||
0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
|
||||
#else
|
||||
return (vector bool int)vec_perm(
|
||||
__a, __b,
|
||||
(vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* vec_vpkuhum */
|
||||
|
||||
#define __builtin_altivec_vpkuhum vec_vpkuhum
|
||||
|
@ -5178,11 +5772,11 @@ static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
|
|||
vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255};
|
||||
__d = vec_xor(__c, __d);
|
||||
return (vector short)__builtin_altivec_vperm_4si((vector int)__b,
|
||||
(vector int)__a, __d);
|
||||
return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
|
||||
(vector int)__a, __d);
|
||||
#else
|
||||
return (vector short)__builtin_altivec_vperm_4si((vector int)__a,
|
||||
(vector int)__b, __c);
|
||||
return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
|
||||
(vector int)__b, __c);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -5237,9 +5831,9 @@ static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
|
|||
vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255};
|
||||
__d = vec_xor(__c, __d);
|
||||
return (vector int)__builtin_altivec_vperm_4si(__b, __a, __d);
|
||||
return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
|
||||
#else
|
||||
return (vector int)__builtin_altivec_vperm_4si(__a, __b, __c);
|
||||
return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -5295,9 +5889,11 @@ static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
|
|||
vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255};
|
||||
__d = vec_xor(__c, __d);
|
||||
return (vector long long)__builtin_altivec_vperm_4si(__b, __a, __d);
|
||||
return (vector signed long long)__builtin_altivec_vperm_4si(
|
||||
(vector int)__b, (vector int)__a, __d);
|
||||
#else
|
||||
return (vector long long)__builtin_altivec_vperm_4si(__a, __b, __c);
|
||||
return (vector signed long long)__builtin_altivec_vperm_4si(
|
||||
(vector int)__a, (vector int)__b, __c);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -5432,11 +6028,21 @@ static vector double __ATTRS_o_ai vec_vperm(vector double __a,
|
|||
|
||||
/* vec_re */
|
||||
|
||||
static vector float __attribute__((__always_inline__))
|
||||
static vector float __ATTRS_o_ai
|
||||
vec_re(vector float __a) {
|
||||
#ifdef __VSX__
|
||||
return __builtin_vsx_xvresp(__a);
|
||||
#else
|
||||
return __builtin_altivec_vrefp(__a);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __VSX__
|
||||
static vector double __ATTRS_o_ai vec_re(vector double __a) {
|
||||
return __builtin_vsx_xvredp(__a);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* vec_vrefp */
|
||||
|
||||
static vector float __attribute__((__always_inline__))
|
||||
|
|
|
@ -1081,6 +1081,31 @@ void test6() {
|
|||
// CHECK: @llvm.ppc.altivec.vctuxs
|
||||
// CHECK-LE: @llvm.ppc.altivec.vctuxs
|
||||
|
||||
/* vec_div */
|
||||
res_vsc = vec_div(vsc, vsc);
|
||||
// CHECK: sdiv <16 x i8>
|
||||
// CHECK-LE: sdiv <16 x i8>
|
||||
|
||||
res_vuc = vec_div(vuc, vuc);
|
||||
// CHECK: udiv <16 x i8>
|
||||
// CHECK-LE: udiv <16 x i8>
|
||||
|
||||
res_vs = vec_div(vs, vs);
|
||||
// CHECK: sdiv <8 x i16>
|
||||
// CHECK-LE: sdiv <8 x i16>
|
||||
|
||||
res_vus = vec_div(vus, vus);
|
||||
// CHECK: udiv <8 x i16>
|
||||
// CHECK-LE: udiv <8 x i16>
|
||||
|
||||
res_vi = vec_div(vi, vi);
|
||||
// CHECK: sdiv <4 x i32>
|
||||
// CHECK-LE: sdiv <4 x i32>
|
||||
|
||||
res_vui = vec_div(vui, vui);
|
||||
// CHECK: udiv <4 x i32>
|
||||
// CHECK-LE: udiv <4 x i32>
|
||||
|
||||
/* vec_dss */
|
||||
vec_dss(0);
|
||||
// CHECK: @llvm.ppc.altivec.dss
|
||||
|
@ -2127,6 +2152,31 @@ void test6() {
|
|||
// CHECK: @llvm.ppc.altivec.mtvscr
|
||||
// CHECK-LE: @llvm.ppc.altivec.mtvscr
|
||||
|
||||
/* vec_mul */
|
||||
res_vsc = vec_mul(vsc, vsc);
|
||||
// CHECK: mul <16 x i8>
|
||||
// CHECK-LE: mul <16 x i8>
|
||||
|
||||
res_vuc = vec_mul(vuc, vuc);
|
||||
// CHECK: mul <16 x i8>
|
||||
// CHECK-LE: mul <16 x i8>
|
||||
|
||||
res_vs = vec_mul(vs, vs);
|
||||
// CHECK: mul <8 x i16>
|
||||
// CHECK-LE: mul <8 x i16>
|
||||
|
||||
res_vus = vec_mul(vus, vus);
|
||||
// CHECK: mul <8 x i16>
|
||||
// CHECK-LE: mul <8 x i16>
|
||||
|
||||
res_vi = vec_mul(vi, vi);
|
||||
// CHECK: mul <4 x i32>
|
||||
// CHECK-LE: mul <4 x i32>
|
||||
|
||||
res_vui = vec_mul(vui, vui);
|
||||
// CHECK: mul <4 x i32>
|
||||
// CHECK-LE: mul <4 x i32>
|
||||
|
||||
/* vec_mule */
|
||||
res_vs = vec_mule(vsc, vsc);
|
||||
// CHECK: @llvm.ppc.altivec.vmulesb
|
||||
|
|
|
@ -35,7 +35,7 @@ vector signed short res_vss;
|
|||
vector unsigned short res_vus;
|
||||
vector bool short res_vbs;
|
||||
|
||||
vector int res_vsi;
|
||||
vector signed int res_vsi;
|
||||
vector unsigned int res_vui;
|
||||
vector bool int res_vbi;
|
||||
|
||||
|
@ -442,6 +442,39 @@ void test1() {
|
|||
// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
|
||||
// CHECK-PPC: error: assigning to
|
||||
|
||||
res_vsc = vec_cntlz(vsc);
|
||||
// CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
|
||||
// CHECK-PPC: warning: implicit declaration of function 'vec_cntlz' is invalid in C99
|
||||
|
||||
res_vuc = vec_cntlz(vuc);
|
||||
// CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
|
||||
|
||||
res_vss = vec_cntlz(vss);
|
||||
// CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
|
||||
|
||||
res_vus = vec_cntlz(vus);
|
||||
// CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
|
||||
|
||||
res_vsi = vec_cntlz(vsi);
|
||||
// CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
|
||||
|
||||
res_vui = vec_cntlz(vui);
|
||||
// CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
|
||||
|
||||
res_vsll = vec_cntlz(vsll);
|
||||
// CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
|
||||
|
||||
res_vull = vec_cntlz(vull);
|
||||
// CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
|
||||
// CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
|
||||
|
||||
/* ----------------------- predicates --------------------------- */
|
||||
/* vec_all_eq */
|
||||
res_i = vec_all_eq(vsll, vsll);
|
||||
|
@ -1136,6 +1169,298 @@ void test1() {
|
|||
// CHECK: @llvm.ppc.altivec.vminud
|
||||
// CHECK-LE: @llvm.ppc.altivec.vminud
|
||||
|
||||
/* vec_nand */
|
||||
res_vsc = vec_nand(vsc, vsc);
|
||||
// CHECK: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-PPC: warning: implicit declaration of function 'vec_nand' is invalid in C99
|
||||
|
||||
res_vsc = vec_nand(vsc, vbc);
|
||||
// CHECK: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
|
||||
res_vsc = vec_nand(vbc, vsc);
|
||||
// CHECK: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
|
||||
res_vuc = vec_nand(vuc, vuc);
|
||||
// CHECK: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
|
||||
res_vuc = vec_nand(vuc, vbc);
|
||||
// CHECK: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
|
||||
res_vuc = vec_nand(vbc, vuc);
|
||||
// CHECK: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
|
||||
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
|
||||
res_vss = vec_nand(vss, vss);
|
||||
// CHECK: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
|
||||
res_vss = vec_nand(vss, vbs);
|
||||
// CHECK: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
|
||||
res_vss = vec_nand(vbs, vss);
|
||||
// CHECK: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
|
||||
res_vus = vec_nand(vus, vus);
|
||||
// CHECK: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
|
||||
res_vus = vec_nand(vus, vbs);
|
||||
// CHECK: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
|
||||
res_vus = vec_nand(vbs, vus);
|
||||
// CHECK: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
|
||||
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
|
||||
res_vsi = vec_nand(vsi, vsi);
|
||||
// CHECK: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
|
||||
res_vsi = vec_nand(vsi, vbi);
|
||||
// CHECK: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
|
||||
res_vsi = vec_nand(vbi, vsi);
|
||||
// CHECK: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
|
||||
res_vui = vec_nand(vui, vui);
|
||||
// CHECK: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
|
||||
res_vui = vec_nand(vui, vbi);
|
||||
// CHECK: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
|
||||
res_vui = vec_nand(vbi, vui);
|
||||
// CHECK: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
|
||||
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
|
||||
res_vsll = vec_nand(vsll, vsll);
|
||||
// CHECK: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
|
||||
res_vsll = vec_nand(vsll, vbll);
|
||||
// CHECK: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
|
||||
res_vsll = vec_nand(vbll, vsll);
|
||||
// CHECK: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
|
||||
res_vull = vec_nand(vull, vull);
|
||||
// CHECK: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
|
||||
res_vull = vec_nand(vull, vbll);
|
||||
// CHECK: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
|
||||
res_vull = vec_nand(vbll, vull);
|
||||
// CHECK: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
|
||||
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
|
||||
|
||||
/* vec_orc */
|
||||
res_vsc = vec_orc(vsc, vsc);
|
||||
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-PPC: warning: implicit declaration of function 'vec_orc' is invalid in C99
|
||||
|
||||
res_vsc = vec_orc(vsc, vbc);
|
||||
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsc = vec_orc(vbc, vsc);
|
||||
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
|
||||
|
||||
res_vuc = vec_orc(vuc, vuc);
|
||||
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
|
||||
|
||||
res_vuc = vec_orc(vuc, vbc);
|
||||
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
|
||||
|
||||
res_vuc = vec_orc(vbc, vuc);
|
||||
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK: or <16 x i8> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
|
||||
|
||||
res_vss = vec_orc(vss, vss);
|
||||
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
|
||||
|
||||
res_vss = vec_orc(vss, vbs);
|
||||
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
|
||||
|
||||
res_vss = vec_orc(vbs, vss);
|
||||
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
|
||||
|
||||
res_vus = vec_orc(vus, vus);
|
||||
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
|
||||
|
||||
res_vus = vec_orc(vus, vbs);
|
||||
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
|
||||
|
||||
res_vus = vec_orc(vbs, vus);
|
||||
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsi = vec_orc(vsi, vsi);
|
||||
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsi = vec_orc(vsi, vbi);
|
||||
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsi = vec_orc(vbi, vsi);
|
||||
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
|
||||
|
||||
res_vui = vec_orc(vui, vui);
|
||||
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
|
||||
|
||||
res_vui = vec_orc(vui, vbi);
|
||||
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
|
||||
|
||||
res_vui = vec_orc(vbi, vui);
|
||||
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsll = vec_orc(vsll, vsll);
|
||||
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsll = vec_orc(vsll, vbll);
|
||||
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
|
||||
|
||||
res_vsll = vec_orc(vbll, vsll);
|
||||
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
|
||||
|
||||
res_vull = vec_orc(vull, vull);
|
||||
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
|
||||
|
||||
res_vull = vec_orc(vull, vbll);
|
||||
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
|
||||
|
||||
res_vull = vec_orc(vbll, vull);
|
||||
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
|
||||
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
|
||||
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
|
||||
|
||||
/* vec_vbpermq */
|
||||
res_vsll = vec_vbpermq(vsc, vsc);
|
||||
// CHECK: llvm.ppc.altivec.vbpermq
|
||||
|
|
|
@ -140,14 +140,31 @@ void test1() {
|
|||
// CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
|
||||
|
||||
/* vec_cpsgn */
|
||||
res_vf = vec_cpsgn(vf, vf);
|
||||
// CHECK: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
|
||||
// CHECK-LE: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
|
||||
|
||||
res_vd = vec_cpsgn(vd, vd);
|
||||
// CHECK: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
|
||||
// CHECK-LE: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
|
||||
|
||||
/* vec_div */
|
||||
res_vsll = vec_div(vsll, vsll);
|
||||
// CHECK: sdiv <2 x i64>
|
||||
// CHECK-LE: sdiv <2 x i64>
|
||||
|
||||
res_vull = vec_div(vull, vull);
|
||||
// CHECK: udiv <2 x i64>
|
||||
// CHECK-LE: udiv <2 x i64>
|
||||
|
||||
res_vf = vec_div(vf, vf);
|
||||
// CHECK: @llvm.ppc.vsx.xvdivsp
|
||||
// CHECK-LE: @llvm.ppc.vsx.xvdivsp
|
||||
// CHECK: fdiv <4 x float>
|
||||
// CHECK-LE: fdiv <4 x float>
|
||||
|
||||
res_vd = vec_div(vd, vd);
|
||||
// CHECK: @llvm.ppc.vsx.xvdivdp
|
||||
// CHECK-LE: @llvm.ppc.vsx.xvdivdp
|
||||
// CHECK: fdiv <2 x double>
|
||||
// CHECK-LE: fdiv <2 x double>
|
||||
|
||||
/* vec_max */
|
||||
res_vf = vec_max(vf, vf);
|
||||
|
@ -249,6 +266,18 @@ void test1() {
|
|||
// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
|
||||
// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
|
||||
|
||||
res_vsi = vec_pack(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vui = vec_pack(vull, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vbi = vec_pack(vbll, vbll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vsll = vec_vperm(vsll, vsll, vuc);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
@ -430,6 +459,57 @@ void test1() {
|
|||
// CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
|
||||
// CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
|
||||
|
||||
/* vec_mergeh */
|
||||
res_vsll = vec_mergeh(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vsll = vec_mergeh(vsll, vbll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vsll = vec_mergeh(vbll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vull = vec_mergeh(vull, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vull = vec_mergeh(vull, vbll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vull = vec_mergeh(vbll, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
/* vec_mergel */
|
||||
res_vsll = vec_mergel(vsll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vsll = vec_mergel(vsll, vbll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vsll = vec_mergel(vbll, vsll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vull = vec_mergel(vull, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vull = vec_mergel(vull, vbll);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
res_vull = vec_mergel(vbll, vull);
|
||||
// CHECK: @llvm.ppc.altivec.vperm
|
||||
// CHECK-LE: @llvm.ppc.altivec.vperm
|
||||
|
||||
/* vec_msub */
|
||||
res_vf = vec_msub(vf, vf, vf);
|
||||
// CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
|
||||
// CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
|
||||
|
@ -442,6 +522,14 @@ void test1() {
|
|||
// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
|
||||
// CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
|
||||
|
||||
res_vsll = vec_mul(vsll, vsll);
|
||||
// CHECK: mul <2 x i64>
|
||||
// CHECK-LE: mul <2 x i64>
|
||||
|
||||
res_vull = vec_mul(vull, vull);
|
||||
// CHECK: mul <2 x i64>
|
||||
// CHECK-LE: mul <2 x i64>
|
||||
|
||||
res_vf = vec_mul(vf, vf);
|
||||
// CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
|
||||
// CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
|
||||
|
@ -548,6 +636,30 @@ void test1() {
|
|||
// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
|
||||
// CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
|
||||
|
||||
res_vd = vec_or(vbll, vd);
|
||||
// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
|
||||
// CHECK: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
|
||||
// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
|
||||
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
|
||||
// CHECK-LE: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
|
||||
// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
|
||||
|
||||
res_vd = vec_or(vd, vbll);
|
||||
// CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
|
||||
// CHECK: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
|
||||
// CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
|
||||
// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
|
||||
// CHECK-LE: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
|
||||
// CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
|
||||
|
||||
res_vf = vec_re(vf);
|
||||
// CHECK: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
|
||||
// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
|
||||
|
||||
res_vd = vec_re(vd);
|
||||
// CHECK: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
|
||||
// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
|
||||
|
||||
res_vf = vec_rint(vf);
|
||||
// CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
|
||||
// CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
|
||||
|
|
Loading…
Reference in New Issue