llvm-project/clang/lib/Headers/xopintrin.h

785 lines
21 KiB
C
Raw Normal View History

2013-04-26 00:14:14 +08:00
/*===---- xopintrin.h - XOP intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*===-----------------------------------------------------------------------===
*/
#ifndef __X86INTRIN_H
2013-04-26 00:14:14 +08:00
#error "Never use <xopintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __XOPINTRIN_H
#define __XOPINTRIN_H
#include <fma4intrin.h>
/* Define the default attributes for the functions in this file. */
[Builtins][Attributes][X86] Tag all X86 builtins with their required vector width. Add a min_vector_width function attribute and tag all x86 instrinsics with it This is part of an ongoing attempt at making 512 bit vectors illegal in the X86 backend type legalizer due to CPU frequency penalties associated with wide vectors on Skylake Server CPUs. We want the loop vectorizer to be able to emit IR containing wide vectors as intermediate operations in vectorized code and allow these wide vectors to be legalized to 256 bits by the X86 backend even though we are targetting a CPU that supports 512 bit vectors. This is similar to what happens with an AVX2 CPU, the vectorizer can emit wide vectors and the backend will split them. We want this splitting behavior, but still be able to use new Skylake instructions that work on 256-bit vectors and support things like masking and gather/scatter. Of course if the user uses explicit vector code in their source code we need to not split those operations. Especially if they have used any of the 512-bit vector intrinsics from immintrin.h. And we need to make it so that merely using the intrinsics produces the expected code in order to be backwards compatible. To support this goal, this patch adds a new IR function attribute "min-legal-vector-width" that can indicate the need for a minimum vector width to be legal in the backend. We need to ensure this attribute is set to the largest vector width needed by any intrinsics from immintrin.h that the function uses. The inliner will be reponsible for merging this attribute when a function is inlined. We may also need a way to limit inlining in the future as well, but we can discuss that in the future. To make things more complicated, there are two different ways intrinsics are implemented in immintrin.h. Either as an always_inline function containing calls to builtins(can be target specific or target independent) or vector extension code. Or as a macro wrapper around a taget specific builtin. I believe I've removed all cases where the macro was around a target independent builtin. To support the always_inline function case this patch adds attribute((min_vector_width(128))) that can be used to tag these functions with their vector width. All x86 intrinsic functions that operate on vectors have been tagged with this attribute. To support the macro case, all x86 specific builtins have also been tagged with the vector width that they require. Use of any builtin with this property will implicitly increase the min_vector_width of the function that calls it. I've done this as a new property in the attribute string for the builtin rather than basing it on the type string so that we can opt into it on a per builtin basis and avoid any impact to target independent builtins. There will be future work to support vectors passed as function arguments and supporting inline assembly. And whatever else we can find that isn't covered by this patch. Special thanks to Chandler who suggested this direction and reviewed a preview version of this patch. And thanks to Eric Christopher who has had many conversations with me about this issue. Differential Revision: https://reviews.llvm.org/D48617 llvm-svn: 336583
2018-07-10 03:00:16 +08:00
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddw_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddd_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddq_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddd_epi16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddq_epi16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddq_epi32(__m128i __A)
{
return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddw_epu8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddd_epu8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddq_epu8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddd_epu16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddq_epu16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_haddq_epu32(__m128i __A)
{
return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_hsubw_epi8(__m128i __A)
{
return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_hsubd_epi16(__m128i __A)
{
return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_hsubq_epi32(__m128i __A)
{
return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)(((__v2du)__A & (__v2du)__C) | ((__v2du)__B & ~(__v2du)__C));
2012-06-10 10:46:15 +08:00
}
[Builtins][Attributes][X86] Tag all X86 builtins with their required vector width. Add a min_vector_width function attribute and tag all x86 instrinsics with it This is part of an ongoing attempt at making 512 bit vectors illegal in the X86 backend type legalizer due to CPU frequency penalties associated with wide vectors on Skylake Server CPUs. We want the loop vectorizer to be able to emit IR containing wide vectors as intermediate operations in vectorized code and allow these wide vectors to be legalized to 256 bits by the X86 backend even though we are targetting a CPU that supports 512 bit vectors. This is similar to what happens with an AVX2 CPU, the vectorizer can emit wide vectors and the backend will split them. We want this splitting behavior, but still be able to use new Skylake instructions that work on 256-bit vectors and support things like masking and gather/scatter. Of course if the user uses explicit vector code in their source code we need to not split those operations. Especially if they have used any of the 512-bit vector intrinsics from immintrin.h. And we need to make it so that merely using the intrinsics produces the expected code in order to be backwards compatible. To support this goal, this patch adds a new IR function attribute "min-legal-vector-width" that can indicate the need for a minimum vector width to be legal in the backend. We need to ensure this attribute is set to the largest vector width needed by any intrinsics from immintrin.h that the function uses. The inliner will be reponsible for merging this attribute when a function is inlined. We may also need a way to limit inlining in the future as well, but we can discuss that in the future. To make things more complicated, there are two different ways intrinsics are implemented in immintrin.h. Either as an always_inline function containing calls to builtins(can be target specific or target independent) or vector extension code. Or as a macro wrapper around a taget specific builtin. I believe I've removed all cases where the macro was around a target independent builtin. To support the always_inline function case this patch adds attribute((min_vector_width(128))) that can be used to tag these functions with their vector width. All x86 intrinsic functions that operate on vectors have been tagged with this attribute. To support the macro case, all x86 specific builtins have also been tagged with the vector width that they require. Use of any builtin with this property will implicitly increase the min_vector_width of the function that calls it. I've done this as a new property in the attribute string for the builtin rather than basing it on the type string so that we can opt into it on a per builtin basis and avoid any impact to target independent builtins. There will be future work to support vectors passed as function arguments and supporting inline assembly. And whatever else we can find that isn't covered by this patch. Special thanks to Chandler who suggested this direction and reviewed a preview version of this patch. And thanks to Eric Christopher who has had many conversations with me about this issue. Differential Revision: https://reviews.llvm.org/D48617 llvm-svn: 336583
2018-07-10 03:00:16 +08:00
static __inline__ __m256i __DEFAULT_FN_ATTRS256
2012-06-10 10:46:15 +08:00
_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
{
return (__m256i)(((__v4du)__A & (__v4du)__C) | ((__v4du)__B & ~(__v4du)__C));
2012-06-10 10:46:15 +08:00
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
2012-06-10 10:46:15 +08:00
_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
{
return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi8(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi32(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rot_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
}
#define _mm_roti_epi8(A, N) \
(__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N))
#define _mm_roti_epi16(A, N) \
(__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N))
#define _mm_roti_epi32(A, N) \
(__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N))
#define _mm_roti_epi64(A, N) \
(__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi8(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi32(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha_epi8(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha_epi32(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
}
#define _mm_com_epu8(A, B, N) \
(__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (N))
#define _mm_com_epu16(A, B, N) \
(__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
(__v8hi)(__m128i)(B), (N))
#define _mm_com_epu32(A, B, N) \
(__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
(__v4si)(__m128i)(B), (N))
#define _mm_com_epu64(A, B, N) \
(__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
(__v2di)(__m128i)(B), (N))
#define _mm_com_epi8(A, B, N) \
(__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (N))
#define _mm_com_epi16(A, B, N) \
(__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
(__v8hi)(__m128i)(B), (N))
#define _mm_com_epi32(A, B, N) \
(__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
(__v4si)(__m128i)(B), (N))
#define _mm_com_epi64(A, B, N) \
(__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
(__v2di)(__m128i)(B), (N))
#define _MM_PCOMCTRL_LT 0
#define _MM_PCOMCTRL_LE 1
#define _MM_PCOMCTRL_GT 2
#define _MM_PCOMCTRL_GE 3
#define _MM_PCOMCTRL_EQ 4
#define _MM_PCOMCTRL_NEQ 5
#define _MM_PCOMCTRL_FALSE 6
#define _MM_PCOMCTRL_TRUE 7
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epu8(__m128i __A, __m128i __B)
{
return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epu16(__m128i __A, __m128i __B)
{
return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epu32(__m128i __A, __m128i __B)
{
return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epu64(__m128i __A, __m128i __B)
{
return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epi8(__m128i __A, __m128i __B)
{
return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epi16(__m128i __A, __m128i __B)
{
return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epi32(__m128i __A, __m128i __B)
{
return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comlt_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comle_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comgt_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comge_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comeq_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comneq_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comfalse_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_comtrue_epi64(__m128i __A, __m128i __B)
{
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
}
#define _mm_permute2_pd(X, Y, C, I) \
(__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), \
(__v2di)(__m128i)(C), (I))
#define _mm256_permute2_pd(X, Y, C, I) \
(__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
(__v4df)(__m256d)(Y), \
(__v4di)(__m256i)(C), (I))
#define _mm_permute2_ps(X, Y, C, I) \
(__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
(__v4si)(__m128i)(C), (I))
#define _mm256_permute2_ps(X, Y, C, I) \
(__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
(__v8sf)(__m256)(Y), \
(__v8si)(__m256i)(C), (I))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_frcz_ss(__m128 __A)
{
return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_frcz_sd(__m128d __A)
{
return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_frcz_ps(__m128 __A)
{
return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_frcz_pd(__m128d __A)
{
return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
}
[Builtins][Attributes][X86] Tag all X86 builtins with their required vector width. Add a min_vector_width function attribute and tag all x86 instrinsics with it This is part of an ongoing attempt at making 512 bit vectors illegal in the X86 backend type legalizer due to CPU frequency penalties associated with wide vectors on Skylake Server CPUs. We want the loop vectorizer to be able to emit IR containing wide vectors as intermediate operations in vectorized code and allow these wide vectors to be legalized to 256 bits by the X86 backend even though we are targetting a CPU that supports 512 bit vectors. This is similar to what happens with an AVX2 CPU, the vectorizer can emit wide vectors and the backend will split them. We want this splitting behavior, but still be able to use new Skylake instructions that work on 256-bit vectors and support things like masking and gather/scatter. Of course if the user uses explicit vector code in their source code we need to not split those operations. Especially if they have used any of the 512-bit vector intrinsics from immintrin.h. And we need to make it so that merely using the intrinsics produces the expected code in order to be backwards compatible. To support this goal, this patch adds a new IR function attribute "min-legal-vector-width" that can indicate the need for a minimum vector width to be legal in the backend. We need to ensure this attribute is set to the largest vector width needed by any intrinsics from immintrin.h that the function uses. The inliner will be reponsible for merging this attribute when a function is inlined. We may also need a way to limit inlining in the future as well, but we can discuss that in the future. To make things more complicated, there are two different ways intrinsics are implemented in immintrin.h. Either as an always_inline function containing calls to builtins(can be target specific or target independent) or vector extension code. Or as a macro wrapper around a taget specific builtin. I believe I've removed all cases where the macro was around a target independent builtin. To support the always_inline function case this patch adds attribute((min_vector_width(128))) that can be used to tag these functions with their vector width. All x86 intrinsic functions that operate on vectors have been tagged with this attribute. To support the macro case, all x86 specific builtins have also been tagged with the vector width that they require. Use of any builtin with this property will implicitly increase the min_vector_width of the function that calls it. I've done this as a new property in the attribute string for the builtin rather than basing it on the type string so that we can opt into it on a per builtin basis and avoid any impact to target independent builtins. There will be future work to support vectors passed as function arguments and supporting inline assembly. And whatever else we can find that isn't covered by this patch. Special thanks to Chandler who suggested this direction and reviewed a preview version of this patch. And thanks to Eric Christopher who has had many conversations with me about this issue. Differential Revision: https://reviews.llvm.org/D48617 llvm-svn: 336583
2018-07-10 03:00:16 +08:00
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_frcz_ps(__m256 __A)
{
return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
}
[Builtins][Attributes][X86] Tag all X86 builtins with their required vector width. Add a min_vector_width function attribute and tag all x86 instrinsics with it This is part of an ongoing attempt at making 512 bit vectors illegal in the X86 backend type legalizer due to CPU frequency penalties associated with wide vectors on Skylake Server CPUs. We want the loop vectorizer to be able to emit IR containing wide vectors as intermediate operations in vectorized code and allow these wide vectors to be legalized to 256 bits by the X86 backend even though we are targetting a CPU that supports 512 bit vectors. This is similar to what happens with an AVX2 CPU, the vectorizer can emit wide vectors and the backend will split them. We want this splitting behavior, but still be able to use new Skylake instructions that work on 256-bit vectors and support things like masking and gather/scatter. Of course if the user uses explicit vector code in their source code we need to not split those operations. Especially if they have used any of the 512-bit vector intrinsics from immintrin.h. And we need to make it so that merely using the intrinsics produces the expected code in order to be backwards compatible. To support this goal, this patch adds a new IR function attribute "min-legal-vector-width" that can indicate the need for a minimum vector width to be legal in the backend. We need to ensure this attribute is set to the largest vector width needed by any intrinsics from immintrin.h that the function uses. The inliner will be reponsible for merging this attribute when a function is inlined. We may also need a way to limit inlining in the future as well, but we can discuss that in the future. To make things more complicated, there are two different ways intrinsics are implemented in immintrin.h. Either as an always_inline function containing calls to builtins(can be target specific or target independent) or vector extension code. Or as a macro wrapper around a taget specific builtin. I believe I've removed all cases where the macro was around a target independent builtin. To support the always_inline function case this patch adds attribute((min_vector_width(128))) that can be used to tag these functions with their vector width. All x86 intrinsic functions that operate on vectors have been tagged with this attribute. To support the macro case, all x86 specific builtins have also been tagged with the vector width that they require. Use of any builtin with this property will implicitly increase the min_vector_width of the function that calls it. I've done this as a new property in the attribute string for the builtin rather than basing it on the type string so that we can opt into it on a per builtin basis and avoid any impact to target independent builtins. There will be future work to support vectors passed as function arguments and supporting inline assembly. And whatever else we can find that isn't covered by this patch. Special thanks to Chandler who suggested this direction and reviewed a preview version of this patch. And thanks to Eric Christopher who has had many conversations with me about this issue. Differential Revision: https://reviews.llvm.org/D48617 llvm-svn: 336583
2018-07-10 03:00:16 +08:00
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_frcz_pd(__m256d __A)
{
return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
}
#undef __DEFAULT_FN_ATTRS
[Builtins][Attributes][X86] Tag all X86 builtins with their required vector width. Add a min_vector_width function attribute and tag all x86 instrinsics with it This is part of an ongoing attempt at making 512 bit vectors illegal in the X86 backend type legalizer due to CPU frequency penalties associated with wide vectors on Skylake Server CPUs. We want the loop vectorizer to be able to emit IR containing wide vectors as intermediate operations in vectorized code and allow these wide vectors to be legalized to 256 bits by the X86 backend even though we are targetting a CPU that supports 512 bit vectors. This is similar to what happens with an AVX2 CPU, the vectorizer can emit wide vectors and the backend will split them. We want this splitting behavior, but still be able to use new Skylake instructions that work on 256-bit vectors and support things like masking and gather/scatter. Of course if the user uses explicit vector code in their source code we need to not split those operations. Especially if they have used any of the 512-bit vector intrinsics from immintrin.h. And we need to make it so that merely using the intrinsics produces the expected code in order to be backwards compatible. To support this goal, this patch adds a new IR function attribute "min-legal-vector-width" that can indicate the need for a minimum vector width to be legal in the backend. We need to ensure this attribute is set to the largest vector width needed by any intrinsics from immintrin.h that the function uses. The inliner will be reponsible for merging this attribute when a function is inlined. We may also need a way to limit inlining in the future as well, but we can discuss that in the future. To make things more complicated, there are two different ways intrinsics are implemented in immintrin.h. Either as an always_inline function containing calls to builtins(can be target specific or target independent) or vector extension code. Or as a macro wrapper around a taget specific builtin. I believe I've removed all cases where the macro was around a target independent builtin. To support the always_inline function case this patch adds attribute((min_vector_width(128))) that can be used to tag these functions with their vector width. All x86 intrinsic functions that operate on vectors have been tagged with this attribute. To support the macro case, all x86 specific builtins have also been tagged with the vector width that they require. Use of any builtin with this property will implicitly increase the min_vector_width of the function that calls it. I've done this as a new property in the attribute string for the builtin rather than basing it on the type string so that we can opt into it on a per builtin basis and avoid any impact to target independent builtins. There will be future work to support vectors passed as function arguments and supporting inline assembly. And whatever else we can find that isn't covered by this patch. Special thanks to Chandler who suggested this direction and reviewed a preview version of this patch. And thanks to Eric Christopher who has had many conversations with me about this issue. Differential Revision: https://reviews.llvm.org/D48617 llvm-svn: 336583
2018-07-10 03:00:16 +08:00
#undef __DEFAULT_FN_ATTRS256
#endif /* __XOPINTRIN_H */