[X86, AVX] Replace vinsertf128 intrinsics with generic shuffles.

We want to replace as much custom x86 shuffling via intrinsics
as possible because pushing the code down the generic shuffle
optimization path allows for better codegen and less complexity
in LLVM.

This is the sibling patch for the LLVM half of this change:
http://reviews.llvm.org/D8086

Differential Revision: http://reviews.llvm.org/D8088

llvm-svn: 231792
This commit is contained in:
Sanjay Patel 2015-03-10 15:19:26 +00:00
parent 4683395808
commit 7f6aa52e93
5 changed files with 75 additions and 25 deletions

View File

@ -450,9 +450,6 @@ BUILTIN(__builtin_ia32_cvttps2dq256, "V8iV8f", "")
BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dIc", "")
BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fIc", "")
BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8iIc", "")
BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "")
BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "")
BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "")
BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "")
BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "")
BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "")

View File

@ -472,22 +472,6 @@ _mm256_extract_epi64(__m256i __a, const int __imm)
}
#endif
/* Vector insert */
#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
__m256d __V1 = (V1); \
__m128d __V2 = (V2); \
(__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
__m256 __V1 = (V1); \
__m128 __V2 = (V2); \
(__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
__m256i __V1 = (V1); \
__m128i __V2 = (V2); \
(__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_insert_epi32(__m256i __a, int __b, int const __imm)
{
@ -1166,6 +1150,42 @@ _mm256_castsi128_si256(__m128i __a)
return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
}
/*
Vector insert.
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
(__m256)__builtin_shufflevector( \
(__v8sf)(V1), \
(__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
(((M) & 1) ? 0 : 8), \
(((M) & 1) ? 1 : 9), \
(((M) & 1) ? 2 : 10), \
(((M) & 1) ? 3 : 11), \
(((M) & 1) ? 8 : 4), \
(((M) & 1) ? 9 : 5), \
(((M) & 1) ? 10 : 6), \
(((M) & 1) ? 11 : 7) );})
#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
(__m256d)__builtin_shufflevector( \
(__v4df)(V1), \
(__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
(((M) & 1) ? 0 : 4), \
(((M) & 1) ? 1 : 5), \
(((M) & 1) ? 4 : 2), \
(((M) & 1) ? 5 : 3) );})
#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
(__m256i)__builtin_shufflevector( \
(__v4di)(V1), \
(__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
(((M) & 1) ? 0 : 4), \
(((M) & 1) ? 1 : 5), \
(((M) & 1) ? 4 : 2), \
(((M) & 1) ? 5 : 3) );})
/* SIMD load ops (unaligned) */
static __inline __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)

View File

@ -882,9 +882,6 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vextractf128_ps256:
case X86::BI__builtin_ia32_vextractf128_si256:
case X86::BI__builtin_ia32_extract128i256: i = 1, l = 0, u = 1; break;
case X86::BI__builtin_ia32_vinsertf128_pd256:
case X86::BI__builtin_ia32_vinsertf128_ps256:
case X86::BI__builtin_ia32_vinsertf128_si256:
case X86::BI__builtin_ia32_insert128i256: i = 2, l = 0; u = 1; break;
case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break;
case X86::BI__builtin_ia32_vpermil2pd:

View File

@ -97,3 +97,42 @@ test_mm256_broadcast_ss(float const *__a) {
// CHECK: insertelement <8 x float> {{.*}}, i32 7
return _mm256_broadcast_ss(__a);
}
// Make sure we have the correct mask for each insertf128 case.
__m256d test_mm256_insertf128_ps_0(__m256 a, __m128 b) {
// CHECK-LABEL: @test_mm256_insertf128_ps_0
// CHECK: shufflevector{{.*}}<i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
return _mm256_insertf128_ps(a, b, 0);
}
__m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) {
// CHECK-LABEL: @test_mm256_insertf128_pd_0
// CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3>
return _mm256_insertf128_pd(a, b, 0);
}
__m256d test_mm256_insertf128_si256_0(__m256i a, __m128i b) {
// CHECK-LABEL: @test_mm256_insertf128_si256_0
// CHECK: shufflevector{{.*}}<i32 4, i32 5, i32 2, i32 3>
return _mm256_insertf128_si256(a, b, 0);
}
__m256d test_mm256_insertf128_ps_1(__m256 a, __m128 b) {
// CHECK-LABEL: @test_mm256_insertf128_ps_1
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
return _mm256_insertf128_ps(a, b, 1);
}
__m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) {
// CHECK-LABEL: @test_mm256_insertf128_pd_1
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5>
return _mm256_insertf128_pd(a, b, 1);
}
__m256d test_mm256_insertf128_si256_1(__m256i a, __m128i b) {
// CHECK-LABEL: @test_mm256_insertf128_si256_1
// CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 4, i32 5>
return _mm256_insertf128_si256(a, b, 1);
}

View File

@ -419,9 +419,6 @@ void f0() {
tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7);
tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7);
tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7);
tmp_V4d = __builtin_ia32_vinsertf128_pd256(tmp_V4d, tmp_V2d, 0x1);
tmp_V8f = __builtin_ia32_vinsertf128_ps256(tmp_V8f, tmp_V4f, 0x1);
tmp_V8i = __builtin_ia32_vinsertf128_si256(tmp_V8i, tmp_V4i, 0x1);
tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d);
tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f);
tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f);