[X86] Use __builtin_convertvector to replace some of the avx512 truncate builtins.

As long as the destination type is a 256 or 128 bit vector with the same number of elements we can use __builtin_convertvector to directly generate trunc IR instruction which will be handled natively by the backend.

Differential Revision: https://reviews.llvm.org/D46742

llvm-svn: 332266
This commit is contained in:
Craig Topper 2018-05-14 17:50:40 +00:00
parent 64a2ea41ea
commit 25de41cfbc
9 changed files with 96 additions and 98 deletions

View File

@ -1355,7 +1355,6 @@ TARGET_BUILTIN(__builtin_ia32_vpshrdw512_mask, "V32sV32sV32sIiV32sUi", "nc", "av
TARGET_BUILTIN(__builtin_ia32_pmovswb512_mask, "V32cV32sV32cUi", "nc", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovuswb512_mask, "V32cV32sV32cUi", "nc", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovwb512_mask, "V32cV32sV32cUi", "nc", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_cvtpd2qq128_mask, "V2LLiV2dV2LLiUc", "nc", "avx512vl,avx512dq")
TARGET_BUILTIN(__builtin_ia32_cvtpd2qq256_mask, "V4LLiV4dV4LLiUc", "nc", "avx512vl,avx512dq")
TARGET_BUILTIN(__builtin_ia32_cvtpd2uqq128_mask, "V2LLiV2dV2LLiUc", "nc", "avx512vl,avx512dq")
@ -1397,7 +1396,6 @@ TARGET_BUILTIN(__builtin_ia32_pmovswb256_mask, "V16cV16sV16cUs", "nc", "avx512vl
TARGET_BUILTIN(__builtin_ia32_pmovuswb128_mask, "V16cV8sV16cUc", "nc", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovuswb256_mask, "V16cV16sV16cUs", "nc", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovwb128_mask, "V16cV8sV16cUc", "nc", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovwb256_mask, "V16cV16sV16cUs", "nc", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_cvtpd2qq512_mask, "V8LLiV8dV8LLiUcIi", "nc", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_cvtpd2uqq512_mask, "V8LLiV8dV8LLiUcIi", "nc", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_cvtps2qq512_mask, "V8LLiV8fV8LLiUcIi", "nc", "avx512dq")
@ -1719,16 +1717,12 @@ TARGET_BUILTIN(__builtin_ia32_pmovusqw128_mask, "V8sV2LLiV8sUc", "nc", "avx512vl
TARGET_BUILTIN(__builtin_ia32_pmovusqw128mem_mask, "vV8s*V2LLiUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovusqw256_mask, "V8sV4LLiV8sUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovusqw256mem_mask, "vV8s*V4LLiUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovdb512_mask, "V16cV16iV16cUs", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovdb512mem_mask, "vV16c*V16iUs", "n", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovwb512mem_mask, "vV32c*V32sUi", "n", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovdw512_mask, "V16sV16iV16sUs", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovdw512mem_mask, "vV16s*V16iUs", "n", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovqb512_mask, "V16cV8LLiV16cUc", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovqb512mem_mask, "vV16c*V8LLiUc", "n", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovqd512_mask, "V8iV8LLiV8iUc", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovqd512mem_mask, "vV8i*V8LLiUc", "n", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovqw512_mask, "V8sV8LLiV8sUc", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovqw512mem_mask, "vV8s*V8LLiUc", "n", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmovdb128_mask, "V16cV4iV16cUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovwb128mem_mask, "vV16c*V8sUc", "n", "avx512vl,avx512bw")
@ -1738,7 +1732,6 @@ TARGET_BUILTIN(__builtin_ia32_pmovdb256mem_mask, "vV16c*V8iUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovwb256mem_mask, "vV16c*V16sUs", "n", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovdw128_mask, "V8sV4iV8sUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovdw128mem_mask, "vV8s*V4iUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovdw256_mask, "V8sV8iV8sUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovdw256mem_mask, "vV8s*V8iUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqb128_mask, "V16cV2LLiV16cUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqb128mem_mask, "vV16c*V2LLiUc", "n", "avx512vl")
@ -1746,7 +1739,6 @@ TARGET_BUILTIN(__builtin_ia32_pmovqb256_mask, "V16cV4LLiV16cUc", "nc", "avx512vl
TARGET_BUILTIN(__builtin_ia32_pmovqb256mem_mask, "vV16c*V4LLiUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqd128_mask, "V4iV2LLiV4iUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqd128mem_mask, "vV4i*V2LLiUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqd256_mask, "V4iV4LLiV4iUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqd256mem_mask, "vV4i*V4LLiUc", "n", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqw128_mask, "V8sV2LLiV8sUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqw128mem_mask, "vV8s*V2LLiUc", "n", "avx512vl")

View File

@ -1157,23 +1157,21 @@ _mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtepi16_epi8 (__m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
(__v32qi) _mm256_setzero_si256(),
(__mmask32) -1);
return (__m256i)__builtin_convertvector((__v32hi)__A, __v32qi);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
(__v32qi) __O,
__M);
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
(__v32qi)_mm512_cvtepi16_epi8(__A),
(__v32qi)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
(__v32qi) _mm256_setzero_si256(),
__M);
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
(__v32qi)_mm512_cvtepi16_epi8(__A),
(__v32qi)_mm256_setzero_si256());
}
static __inline__ void __DEFAULT_FN_ATTRS

View File

@ -7601,24 +7601,23 @@ _mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm512_cvtepi32_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
(__v16qi) _mm_undefined_si128 (),
(__mmask16) -1);
return (__m128i)__builtin_convertvector((__v16si)__A, __v16qi);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
(__v16qi) __O, __M);
return (__m128i)__builtin_ia32_selectb_128((__mmask8)__M,
(__v16qi)_mm512_cvtepi32_epi8(__A),
(__v16qi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
(__v16qi) _mm_setzero_si128 (),
__M);
return (__m128i)__builtin_ia32_selectb_128((__mmask8)__M,
(__v16qi)_mm512_cvtepi32_epi8(__A),
(__v16qi)_mm_setzero_si128());
}
static __inline__ void __DEFAULT_FN_ATTRS
@ -7630,24 +7629,23 @@ _mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtepi32_epi16 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
(__v16hi) _mm256_undefined_si256 (),
(__mmask16) -1);
return (__m256i)__builtin_convertvector((__v16si)__A, __v16hi);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
(__v16hi) __O, __M);
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
(__v16hi)_mm512_cvtepi32_epi16(__A),
(__v16hi)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
(__v16hi) _mm256_setzero_si256 (),
__M);
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
(__v16hi)_mm512_cvtepi32_epi16(__A),
(__v16hi)_mm256_setzero_si256());
}
static __inline__ void __DEFAULT_FN_ATTRS
@ -7688,24 +7686,23 @@ _mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtepi64_epi32 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
(__v8si) _mm256_undefined_si256 (),
(__mmask8) -1);
return (__m256i)__builtin_convertvector((__v8di) __A, __v8si);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
(__v8si) __O, __M);
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm512_cvtepi64_epi32(__A),
(__v8si)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
(__v8si) _mm256_setzero_si256 (),
__M);
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm512_cvtepi64_epi32(__A),
(__v8si)_mm256_setzero_si256());
}
static __inline__ void __DEFAULT_FN_ATTRS
@ -7717,24 +7714,23 @@ _mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm512_cvtepi64_epi16 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
(__v8hi) _mm_undefined_si128 (),
(__mmask8) -1);
return (__m128i)__builtin_convertvector((__v8di)__A, __v8hi);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
(__v8hi) __O, __M);
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm512_cvtepi64_epi16(__A),
(__v8hi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
(__v8hi) _mm_setzero_si128 (),
__M);
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm512_cvtepi64_epi16(__A),
(__v8hi)_mm_setzero_si128());
}
static __inline__ void __DEFAULT_FN_ATTRS

View File

@ -1551,23 +1551,21 @@ _mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_cvtepi16_epi8 (__m256i __A) {
return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
(__v16qi) _mm_setzero_si128(),
(__mmask16) -1);
return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
(__v16qi) __O,
__M);
return (__m128i)__builtin_ia32_selectb_128((__mmask8)__M,
(__v16qi)_mm256_cvtepi16_epi8(__A),
(__v16qi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
(__v16qi) _mm_setzero_si128(),
__M);
return (__m128i)__builtin_ia32_selectb_128((__mmask8)__M,
(__v16qi)_mm256_cvtepi16_epi8(__A),
(__v16qi)_mm_setzero_si128());
}
static __inline__ void __DEFAULT_FN_ATTRS

View File

@ -7627,24 +7627,23 @@ _mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_cvtepi32_epi16 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
(__v8hi)_mm_setzero_si128 (),
(__mmask8) -1);
return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
(__v8hi) __O, __M);
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm256_cvtepi32_epi16(__A),
(__v8hi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
(__v8hi) _mm_setzero_si128 (),
__M);
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm256_cvtepi32_epi16(__A),
(__v8hi)_mm_setzero_si128());
}
static __inline__ void __DEFAULT_FN_ATTRS
@ -7743,24 +7742,23 @@ _mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_cvtepi64_epi32 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
(__v4si) _mm_undefined_si128(),
(__mmask8) -1);
return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
(__v4si) __O, __M);
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm256_cvtepi64_epi32(__A),
(__v4si)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
(__v4si) _mm_setzero_si128 (),
__M);
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm256_cvtepi64_epi32(__A),
(__v4si)_mm_setzero_si128());
}
static __inline__ void __DEFAULT_FN_ATTRS

View File

@ -1107,19 +1107,21 @@ __m256i test_mm512_maskz_cvtusepi16_epi8(__mmask32 __M, __m512i __A) {
__m256i test_mm512_cvtepi16_epi8(__m512i __A) {
// CHECK-LABEL: @test_mm512_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.512
// CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
return _mm512_cvtepi16_epi8(__A);
}
__m256i test_mm512_mask_cvtepi16_epi8(__m256i __O, __mmask32 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.512
// CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm512_mask_cvtepi16_epi8(__O, __M, __A);
}
__m256i test_mm512_maskz_cvtepi16_epi8(__mmask32 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.512
// CHECK: trunc <32 x i16> %{{.*}} to <32 x i8>
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm512_maskz_cvtepi16_epi8(__M, __A);
}

View File

@ -5102,19 +5102,21 @@ void test_mm512_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m512i __
__m128i test_mm512_cvtepi32_epi8(__m512i __A) {
// CHECK-LABEL: @test_mm512_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.512
// CHECK: trunc <16 x i32> %{{.*}} to <16 x i8>
return _mm512_cvtepi32_epi8(__A);
}
__m128i test_mm512_mask_cvtepi32_epi8(__m128i __O, __mmask16 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.512
// CHECK: trunc <16 x i32> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm512_mask_cvtepi32_epi8(__O, __M, __A);
}
__m128i test_mm512_maskz_cvtepi32_epi8(__mmask16 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.512
// CHECK: trunc <16 x i32> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm512_maskz_cvtepi32_epi8(__M, __A);
}
@ -5126,19 +5128,21 @@ void test_mm512_mask_cvtepi32_storeu_epi8(void * __P, __mmask16 __M, __m512i __A
__m256i test_mm512_cvtepi32_epi16(__m512i __A) {
// CHECK-LABEL: @test_mm512_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.512
// CHECK: trunc <16 x i32> %{{.*}} to <16 x i16>
return _mm512_cvtepi32_epi16(__A);
}
__m256i test_mm512_mask_cvtepi32_epi16(__m256i __O, __mmask16 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.512
// CHECK: trunc <16 x i32> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm512_mask_cvtepi32_epi16(__O, __M, __A);
}
__m256i test_mm512_maskz_cvtepi32_epi16(__mmask16 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.512
// CHECK: trunc <16 x i32> %{{.*}} to <16 x i16>
// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm512_maskz_cvtepi32_epi16(__M, __A);
}
@ -5174,19 +5178,21 @@ void test_mm512_mask_cvtepi64_storeu_epi8(void * __P, __mmask8 __M, __m512i __A)
__m256i test_mm512_cvtepi64_epi32(__m512i __A) {
// CHECK-LABEL: @test_mm512_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.512
// CHECK: trunc <8 x i64> %{{.*}} to <8 x i32>
return _mm512_cvtepi64_epi32(__A);
}
__m256i test_mm512_mask_cvtepi64_epi32(__m256i __O, __mmask8 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.512
// CHECK: trunc <8 x i64> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_mask_cvtepi64_epi32(__O, __M, __A);
}
__m256i test_mm512_maskz_cvtepi64_epi32(__mmask8 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.512
// CHECK: trunc <8 x i64> %{{.*}} to <8 x i32>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_maskz_cvtepi64_epi32(__M, __A);
}
@ -5198,19 +5204,21 @@ void test_mm512_mask_cvtepi64_storeu_epi32(void* __P, __mmask8 __M, __m512i __A)
__m128i test_mm512_cvtepi64_epi16(__m512i __A) {
// CHECK-LABEL: @test_mm512_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.512
// CHECK: trunc <8 x i64> %{{.*}} to <8 x i16>
return _mm512_cvtepi64_epi16(__A);
}
__m128i test_mm512_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.512
// CHECK: trunc <8 x i64> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm512_mask_cvtepi64_epi16(__O, __M, __A);
}
__m128i test_mm512_maskz_cvtepi64_epi16(__mmask8 __M, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.512
// CHECK: trunc <8 x i64> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm512_maskz_cvtepi64_epi16(__M, __A);
}

View File

@ -6577,19 +6577,21 @@ void test_mm_mask_cvtepi32_storeu_epi16(void * __P, __mmask8 __M, __m128i __A) {
__m128i test_mm256_cvtepi32_epi16(__m256i __A) {
// CHECK-LABEL: @test_mm256_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
return _mm256_cvtepi32_epi16(__A);
}
__m128i test_mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm256_mask_cvtepi32_epi16(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
// CHECK: trunc <8 x i32> %{{.*}} to <8 x i16>
// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm256_maskz_cvtepi32_epi16(__M, __A);
}
@ -6673,19 +6675,21 @@ void test_mm_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m128i __A) {
__m128i test_mm256_cvtepi64_epi32(__m256i __A) {
// CHECK-LABEL: @test_mm256_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.256
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
return _mm256_cvtepi64_epi32(__A);
}
__m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.256
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_mask_cvtepi64_epi32(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.256
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_maskz_cvtepi64_epi32(__M, __A);
}

View File

@ -1804,19 +1804,21 @@ __m128i test_mm_maskz_cvtepi16_epi8(__mmask8 __M, __m128i __A) {
__m128i test_mm256_cvtepi16_epi8(__m256i __A) {
// CHECK-LABEL: @test_mm256_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.256
// CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
return _mm256_cvtepi16_epi8(__A);
}
__m128i test_mm256_mask_cvtepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.256
// CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm256_mask_cvtepi16_epi8(__O, __M, __A);
}
__m128i test_mm256_maskz_cvtepi16_epi8(__mmask16 __M, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepi16_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.wb.256
// CHECK: trunc <16 x i16> %{{.*}} to <16 x i8>
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm256_maskz_cvtepi16_epi8(__M, __A);
}