[PowerPC] Add vector conversion builtins to altivec.h - clang portion

This patch corresponds to review:
https://reviews.llvm.org/D26308

It adds a number of vector type conversion builtins to altivec.h.

llvm-svn: 286627
This commit is contained in:
Nemanja Ivanovic 2016-11-11 19:56:17 +00:00
parent f1a12fe0f5
commit 4079fc8188
5 changed files with 665 additions and 10 deletions

View File

@ -380,6 +380,16 @@ BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "")
BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "")
BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "")
// Conversion builtins
BUILTIN(__builtin_vsx_xvcvdpsxws, "V4SiV2d", "")
BUILTIN(__builtin_vsx_xvcvdpuxws, "V4UiV2d", "")
BUILTIN(__builtin_vsx_xvcvsxwdp, "V2dV4Si", "")
BUILTIN(__builtin_vsx_xvcvuxwdp, "V2dV4Ui", "")
BUILTIN(__builtin_vsx_xvcvspdp, "V2dV4f", "")
BUILTIN(__builtin_vsx_xvcvsxdsp, "V4fV2SLLi", "")
BUILTIN(__builtin_vsx_xvcvuxdsp, "V4fV2ULLi", "")
BUILTIN(__builtin_vsx_xvcvdpsp, "V4fV2d", "")
// HTM builtins
BUILTIN(__builtin_tbegin, "UiUIi", "")
BUILTIN(__builtin_tend, "UiUIi", "")

View File

@ -2732,20 +2732,284 @@ vec_vctuxs(vector float __a, int __b) {
return __builtin_altivec_vctuxs(__a, __b);
}
/* vec_signed */
static __inline__ vector signed int __ATTRS_o_ai
vec_sld(vector signed int, vector signed int, unsigned const int __c);
static __inline__ vector signed int __ATTRS_o_ai
vec_signed(vector float __a) {
return __builtin_convertvector(__a, vector signed int);
}
#ifdef __VSX__
static __inline__ vector signed long long __ATTRS_o_ai
vec_signed(vector double __a) {
return __builtin_convertvector(__a, vector signed long long);
}
static __inline__ vector signed int __attribute__((__always_inline__))
vec_signed2(vector double __a, vector double __b) {
return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
}
static __inline__ vector signed int __ATTRS_o_ai
vec_signede(vector double __a) {
#ifdef __LITTLE_ENDIAN__
vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
return vec_sld(__ret, __ret, 12);
#else
return __builtin_vsx_xvcvdpsxws(__a);
#endif
}
static __inline__ vector signed int __ATTRS_o_ai
vec_signedo(vector double __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvdpsxws(__a);
#else
vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
return vec_sld(__ret, __ret, 12);
#endif
}
#endif
/* vec_unsigned */
static __inline__ vector unsigned int __ATTRS_o_ai
vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
static __inline__ vector unsigned int __ATTRS_o_ai
vec_unsigned(vector float __a) {
return __builtin_convertvector(__a, vector unsigned int);
}
#ifdef __VSX__
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_unsigned(vector double __a) {
return __builtin_convertvector(__a, vector unsigned long long);
}
static __inline__ vector unsigned int __attribute__((__always_inline__))
vec_unsigned2(vector double __a, vector double __b) {
return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_unsignede(vector double __a) {
#ifdef __LITTLE_ENDIAN__
vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
return vec_sld(__ret, __ret, 12);
#else
return __builtin_vsx_xvcvdpuxws(__a);
#endif
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_unsignedo(vector double __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvdpuxws(__a);
#else
vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
return vec_sld(__ret, __ret, 12);
#endif
}
#endif
/* vec_float */
static __inline__ vector float __ATTRS_o_ai
vec_sld(vector float, vector float, unsigned const int __c);
static __inline__ vector float __ATTRS_o_ai
vec_float(vector signed int __a) {
return __builtin_convertvector(__a, vector float);
}
static __inline__ vector float __ATTRS_o_ai
vec_float(vector unsigned int __a) {
return __builtin_convertvector(__a, vector float);
}
#ifdef __VSX__
static __inline__ vector float __ATTRS_o_ai
vec_float2(vector signed long long __a, vector signed long long __b) {
return (vector float) { __a[0], __a[1], __b[0], __b[1] };
}
static __inline__ vector float __ATTRS_o_ai
vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
return (vector float) { __a[0], __a[1], __b[0], __b[1] };
}
static __inline__ vector float __ATTRS_o_ai
vec_float2(vector double __a, vector double __b) {
return (vector float) { __a[0], __a[1], __b[0], __b[1] };
}
static __inline__ vector float __ATTRS_o_ai
vec_floate(vector signed long long __a) {
#ifdef __LITTLE_ENDIAN__
vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
return vec_sld(__ret, __ret, 12);
#else
return __builtin_vsx_xvcvsxdsp(__a);
#endif
}
static __inline__ vector float __ATTRS_o_ai
vec_floate(vector unsigned long long __a) {
#ifdef __LITTLE_ENDIAN__
vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
return vec_sld(__ret, __ret, 12);
#else
return __builtin_vsx_xvcvuxdsp(__a);
#endif
}
static __inline__ vector float __ATTRS_o_ai
vec_floate(vector double __a) {
#ifdef __LITTLE_ENDIAN__
vector float __ret = __builtin_vsx_xvcvdpsp(__a);
return vec_sld(__ret, __ret, 12);
#else
return __builtin_vsx_xvcvdpsp(__a);
#endif
}
static __inline__ vector float __ATTRS_o_ai
vec_floato(vector signed long long __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvsxdsp(__a);
#else
vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
return vec_sld(__ret, __ret, 12);
#endif
}
static __inline__ vector float __ATTRS_o_ai
vec_floato(vector unsigned long long __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvuxdsp(__a);
#else
vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
return vec_sld(__ret, __ret, 12);
#endif
}
static __inline__ vector float __ATTRS_o_ai
vec_floato(vector double __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvdpsp(__a);
#else
vector float __ret = __builtin_vsx_xvcvdpsp(__a);
return vec_sld(__ret, __ret, 12);
#endif
}
#endif
/* vec_double */
#ifdef __VSX__
static __inline__ vector double __ATTRS_o_ai
vec_double(vector signed long long __a) {
return __builtin_convertvector(__a, vector double);
}
static __inline__ vector double __ATTRS_o_ai
vec_double(vector unsigned long long __a) {
return __builtin_convertvector(__a, vector double);
}
static __inline__ vector double __ATTRS_o_ai
vec_doublee(vector signed int __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
#else
return __builtin_vsx_xvcvsxwdp(__a);
#endif
}
static __inline__ vector double __ATTRS_o_ai
vec_doublee(vector unsigned int __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
#else
return __builtin_vsx_xvcvuxwdp(__a);
#endif
}
static __inline__ vector double __ATTRS_o_ai
vec_doublee(vector float __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
#else
return __builtin_vsx_xvcvspdp(__a);
#endif
}
static __inline__ vector double __ATTRS_o_ai
vec_doubleh(vector signed int __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
vec_double(vector unsigned long long __a) {
vec_doubleh(vector unsigned int __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
vec_doubleh(vector float __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
vec_doublel(vector signed int __a) {
vector double __ret = {__a[2], __a[3]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
vec_doublel(vector unsigned int __a) {
vector double __ret = {__a[2], __a[3]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
vec_doublel(vector float __a) {
vector double __ret = {__a[2], __a[3]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
vec_doubleo(vector signed int __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvsxwdp(__a);
#else
return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
#endif
}
static __inline__ vector double __ATTRS_o_ai
vec_doubleo(vector unsigned int __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvuxwdp(__a);
#else
return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
#endif
}
static __inline__ vector double __ATTRS_o_ai
vec_doubleo(vector float __a) {
#ifdef __LITTLE_ENDIAN__
return __builtin_vsx_xvcvspdp(__a);
#else
return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
#endif
}
#endif
/* vec_div */

View File

@ -1173,6 +1173,22 @@ void test6() {
// CHECK: @llvm.ppc.altivec.vctuxs
// CHECK-LE: @llvm.ppc.altivec.vctuxs
res_vi = vec_signed(vf);
// CHECK: fptosi <4 x float>
// CHECK-LE: fptosi <4 x float>
res_vui = vec_unsigned(vf);
// CHECK: fptoui <4 x float>
// CHECK-LE: fptoui <4 x float>
res_vf = vec_float(vi);
// CHECK: sitofp <4 x i32>
// CHECK-LE: sitofp <4 x i32>
res_vf = vec_float(vui);
// CHECK: uitofp <4 x i32>
// CHECK-LE: uitofp <4 x i32>
/* vec_div */
res_vsc = vec_div(vsc, vsc);
// CHECK: sdiv <16 x i8>

View File

@ -210,15 +210,6 @@ void test1() {
// CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
// CHECK-PPC: error: call to 'vec_cmplt' is ambiguous
/* vec_double */
res_vd = vec_double(vsll);
// CHECK: sitofp i64 {{.+}} to double
// CHECK-BE: sitofp i64 {{.+}} to double
res_vd = vec_double(vull);
// CHECK: uitofp i64 {{.+}} to double
// CHECK-BE: uitofp i64 {{.+}} to double
/* vec_eqv */
res_vsc = vec_eqv(vsc, vsc);
// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>

View File

@ -1082,6 +1082,380 @@ void test1() {
// CHECK-LE: uitofp <2 x i64> %{{.*}} to <2 x double>
// CHECK-LE: fmul <2 x double>
res_vsll = vec_signed(vd);
// CHECK: fptosi <2 x double>
// CHECK-LE: fptosi <2 x double>
res_vsi = vec_signed2(vd, vd);
// CHECK: extractelement <2 x double>
// CHECK: fptosi double
// CHECK: insertelement <4 x i32>
// CHECK: extractelement <2 x double>
// CHECK: fptosi double
// CHECK: insertelement <4 x i32>
// CHECK: extractelement <2 x double>
// CHECK: fptosi double
// CHECK: insertelement <4 x i32>
// CHECK: extractelement <2 x double>
// CHECK: fptosi double
// CHECK: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptosi double
// CHECK-LE: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptosi double
// CHECK-LE: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptosi double
// CHECK-LE: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptosi double
// CHECK-LE: insertelement <4 x i32>
res_vsi = vec_signede(vd);
// CHECK: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
// CHECK-LE: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vsi = vec_signedo(vd);
// CHECK: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvdpsxws(<2 x double>
res_vull = vec_unsigned(vd);
// CHECK: fptoui <2 x double>
// CHECK-LE: fptoui <2 x double>
res_vui = vec_unsigned2(vd, vd);
// CHECK: extractelement <2 x double>
// CHECK: fptoui double
// CHECK: insertelement <4 x i32>
// CHECK: extractelement <2 x double>
// CHECK: fptoui double
// CHECK: insertelement <4 x i32>
// CHECK: extractelement <2 x double>
// CHECK: fptoui double
// CHECK: insertelement <4 x i32>
// CHECK: extractelement <2 x double>
// CHECK: fptoui double
// CHECK: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptoui double
// CHECK-LE: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptoui double
// CHECK-LE: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptoui double
// CHECK-LE: insertelement <4 x i32>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptoui double
// CHECK-LE: insertelement <4 x i32>
res_vui = vec_unsignede(vd);
// CHECK: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
// CHECK-LE: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vui = vec_unsignedo(vd);
// CHECK: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvdpuxws(<2 x double>
res_vf = vec_float2(vsll, vsll);
// CHECK: extractelement <2 x i64>
// CHECK: sitofp i64
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x i64>
// CHECK: sitofp i64
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x i64>
// CHECK: sitofp i64
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x i64>
// CHECK: sitofp i64
// CHECK: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: sitofp i64
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: sitofp i64
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: sitofp i64
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: sitofp i64
// CHECK-LE: insertelement <4 x float>
res_vf = vec_float2(vull, vull);
// CHECK: extractelement <2 x i64>
// CHECK: uitofp i64
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x i64>
// CHECK: uitofp i64
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x i64>
// CHECK: uitofp i64
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x i64>
// CHECK: uitofp i64
// CHECK: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: uitofp i64
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: uitofp i64
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: uitofp i64
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x i64>
// CHECK-LE: uitofp i64
// CHECK-LE: insertelement <4 x float>
res_vf = vec_float2(vd, vd);
// CHECK: extractelement <2 x double>
// CHECK: fptrunc double
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x double>
// CHECK: fptrunc double
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x double>
// CHECK: fptrunc double
// CHECK: insertelement <4 x float>
// CHECK: extractelement <2 x double>
// CHECK: fptrunc double
// CHECK: insertelement <4 x float>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptrunc double
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptrunc double
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptrunc double
// CHECK-LE: insertelement <4 x float>
// CHECK-LE: extractelement <2 x double>
// CHECK-LE: fptrunc double
// CHECK-LE: insertelement <4 x float>
res_vf = vec_floate(vsll);
// CHECK: @llvm.ppc.vsx.xvcvsxdsp
// CHECK-LE: @llvm.ppc.vsx.xvcvsxdsp
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vf = vec_floate(vull);
// CHECK: @llvm.ppc.vsx.xvcvuxdsp
// CHECK-LE: @llvm.ppc.vsx.xvcvuxdsp
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vf = vec_floate(vd);
// CHECK: @llvm.ppc.vsx.xvcvdpsp
// CHECK-LE: @llvm.ppc.vsx.xvcvdpsp
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
res_vf = vec_floato(vsll);
// CHECK: @llvm.ppc.vsx.xvcvsxdsp
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvsxdsp
res_vf = vec_floato(vull);
// CHECK: @llvm.ppc.vsx.xvcvuxdsp
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvuxdsp
res_vf = vec_floato(vd);
// CHECK: @llvm.ppc.vsx.xvcvdpsp
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvdpsp
res_vd = vec_double(vsll);
// CHECK: sitofp <2 x i64>
// CHECK-LE: sitofp <2 x i64>
res_vd = vec_double(vull);
// CHECK: uitofp <2 x i64>
// CHECK-LE: uitofp <2 x i64>
res_vd = vec_doublee(vsi);
// CHECK: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32
res_vd = vec_doublee(vui);
// CHECK: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32
res_vd = vec_doublee(vf);
// CHECK: @llvm.ppc.vsx.xvcvspdp(<4 x float
// CHECK-LE: sub nsw i32 16
// CHECK-LE: sub nsw i32 17
// CHECK-LE: sub nsw i32 18
// CHECK-LE: sub nsw i32 31
// CHECK-LE: @llvm.ppc.altivec.vperm
// CHECK-LE: @llvm.ppc.vsx.xvcvspdp(<4 x float
res_vd = vec_doubleh(vsi);
// CHECK: extractelement <4 x i32>
// CHECK: sitofp i32
// CHECK: insertelement <2 x double>
// CHECK: extractelement <4 x i32>
// CHECK: sitofp i32
// CHECK: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: sitofp i32
// CHECK-LE: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: sitofp i32
// CHECK-LE: insertelement <2 x double>
res_vd = vec_doubleh(vui);
// CHECK: extractelement <4 x i32>
// CHECK: uitofp i32
// CHECK: insertelement <2 x double>
// CHECK: extractelement <4 x i32>
// CHECK: uitofp i32
// CHECK: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: uitofp i32
// CHECK-LE: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: uitofp i32
// CHECK-LE: insertelement <2 x double>
res_vd = vec_doubleh(vf);
// CHECK: extractelement <4 x float>
// CHECK: fpext float
// CHECK: insertelement <2 x double>
// CHECK: extractelement <4 x float>
// CHECK: fpext float
// CHECK: insertelement <2 x double>
// CHECK-LE: extractelement <4 x float>
// CHECK-LE: fpext float
// CHECK-LE: insertelement <2 x double>
// CHECK-LE: extractelement <4 x float>
// CHECK-LE: fpext float
// CHECK-LE: insertelement <2 x double>
res_vd = vec_doublel(vsi);
// CHECK: extractelement <4 x i32>
// CHECK: sitofp i32
// CHECK: insertelement <2 x double>
// CHECK: extractelement <4 x i32>
// CHECK: sitofp i32
// CHECK: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: sitofp i32
// CHECK-LE: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: sitofp i32
// CHECK-LE: insertelement <2 x double>
res_vd = vec_doublel(vui);
// CHECK: extractelement <4 x i32>
// CHECK: uitofp i32
// CHECK: insertelement <2 x double>
// CHECK: extractelement <4 x i32>
// CHECK: uitofp i32
// CHECK: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: uitofp i32
// CHECK-LE: insertelement <2 x double>
// CHECK-LE: extractelement <4 x i32>
// CHECK-LE: uitofp i32
// CHECK-LE: insertelement <2 x double>
res_vd = vec_doublel(vf);
// CHECK: extractelement <4 x float>
// CHECK: fpext float
// CHECK: insertelement <2 x double>
// CHECK: extractelement <4 x float>
// CHECK: fpext float
// CHECK: insertelement <2 x double>
// CHECK-LE: extractelement <4 x float>
// CHECK-LE: fpext float
// CHECK-LE: insertelement <2 x double>
// CHECK-LE: extractelement <4 x float>
// CHECK-LE: fpext float
// CHECK-LE: insertelement <2 x double>
res_vd = vec_doubleo(vsi);
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32>
// CHECK-LE: @llvm.ppc.vsx.xvcvsxwdp(<4 x i32>
res_vd = vec_doubleo(vui);
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32>
// CHECK-LE: @llvm.ppc.vsx.xvcvuxwdp(<4 x i32>
res_vd = vec_doubleo(vf);
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3
// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15
// CHECK: @llvm.ppc.altivec.vperm
// CHECK: @llvm.ppc.vsx.xvcvspdp(<4 x float>
// CHECK-LE: @llvm.ppc.vsx.xvcvspdp(<4 x float>
res_vbll = vec_reve(vbll);
// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>
// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>