ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
// RUN: %clang_cc1 -triple thumbv7-apple-darwin \
|
|
|
|
// RUN: -target-abi apcs-gnu \
|
|
|
|
// RUN: -target-cpu cortex-a8 \
|
|
|
|
// RUN: -mfloat-abi soft \
|
|
|
|
// RUN: -target-feature +soft-float-abi \
|
|
|
|
// RUN: -ffreestanding \
|
2016-03-10 02:54:42 +08:00
|
|
|
// RUN: -emit-llvm -w -o - %s | opt -S -mem2reg | FileCheck %s
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
|
|
|
|
#include <arm_neon.h>
|
|
|
|
|
|
|
|
// Check that the vget_low/vget_high intrinsics generate a single shuffle
|
|
|
|
// without any bitcasting.
|
|
|
|
int8x8_t low_s8(int8x16_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_s8(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8x8_t low_u8 (uint8x16_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_u8(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
int16x4_t low_s16( int16x8_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_s16(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16x4_t low_u16(uint16x8_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_u16(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
int32x2_t low_s32( int32x4_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 0, i32 1>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_s32(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32x2_t low_u32(uint32x4_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 0, i32 1>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_u32(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64x1_t low_s64( int64x2_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> zeroinitializer
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_s64(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64x1_t low_u64(uint64x2_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> zeroinitializer
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_u64(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
poly8x8_t low_p8 (poly8x16_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_p8(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
poly16x4_t low_p16(poly16x8_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_p16(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
float32x2_t low_f32(float32x4_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> <i32 0, i32 1>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_low_f32(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int8x8_t high_s8(int8x16_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_s8(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8x8_t high_u8 (uint8x16_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_u8(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
int16x4_t high_s16( int16x8_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_s16(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16x4_t high_u16(uint16x8_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_u16(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
int32x2_t high_s32( int32x4_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 2, i32 3>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_s32(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32x2_t high_u32(uint32x4_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> <i32 2, i32 3>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_u32(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64x1_t high_s64( int64x2_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> <i32 1>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_s64(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64x1_t high_u64(uint64x2_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> <i32 1>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_u64(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
poly8x8_t high_p8 (poly8x16_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_p8(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
poly16x4_t high_p16(poly16x8_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_p16(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
float32x2_t high_f32(float32x4_t a) {
|
2016-03-10 02:54:42 +08:00
|
|
|
// CHECK: shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> <i32 2, i32 3>
|
ARM: Improve codegen for vget_low_* and vget_high_ intrinsics.
These intrinsics use the __builtin_shuffle() function to extract the
low and high half, respectively, of a 128-bit NEON vector. Currently,
they're defined to use bitcasts to simplify the emitter, so we get code
like:
uint16x4_t vget_low_u32(uint16x8_t __a) {
return (uint32x2_t) __builtin_shufflevector((int64x2_t) __a,
(int64x2_t) __a,
0);
}
While this works, it results in those bitcasts going all the way through
to the IR, resulting in code like:
%1 = bitcast <8 x i16> %in to <2 x i64>
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <1 x i32>
%zeroinitializer
%3 = bitcast <1 x i64> %2 to <4 x i16>
We can instead easily perform the operation directly on the input vector
like:
uint16x4_t vget_low_u16(uint16x8_t __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
Not only is that much easier to read on its own, it also results in
cleaner IR like:
%1 = shufflevector <8 x i16> %in, <8 x i16> undef,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
This is both easier to read and easier for the back end to reason
about effectively since the operation is obfuscating the source with
bitcasts.
rdar://13894163
llvm-svn: 181865
2013-05-15 10:40:04 +08:00
|
|
|
return vget_high_f32(a);
|
|
|
|
}
|
|
|
|
|