[Clang Format] emmintrin.h smmintrin.h (NFC)

This commit is contained in:
Xiang1 Zhang 2022-04-22 09:18:41 +08:00
parent bb8c8751cf
commit 6454ff35e0
2 changed files with 729 additions and 988 deletions

File diff suppressed because it is too large Load Diff

View File

@ -17,23 +17,25 @@
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), \
__min_vector_width__(128)))
/* SSE4 Rounding macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
#define _MM_FROUND_TO_NEG_INF 0x01
#define _MM_FROUND_TO_POS_INF 0x02
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
#define _MM_FROUND_TO_NEAREST_INT 0x00
#define _MM_FROUND_TO_NEG_INF 0x01
#define _MM_FROUND_TO_POS_INF 0x02
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
#define _MM_FROUND_RAISE_EXC 0x00
#define _MM_FROUND_NO_EXC 0x08
#define _MM_FROUND_RAISE_EXC 0x00
#define _MM_FROUND_NO_EXC 0x08
#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
/// Rounds up each element of the 128-bit vector of [4 x float] to an
@ -51,7 +53,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded up.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
/// Rounds up each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@ -68,7 +70,7 @@
/// \param X
/// A 128-bit vector of [2 x double] values to be rounded up.
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@ -93,7 +95,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@ -118,7 +120,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
/// Rounds down each element of the 128-bit vector of [4 x float] to an
/// an integer and returns the rounded values in a 128-bit vector of
@ -135,7 +137,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded down.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
/// Rounds down each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@ -152,7 +154,7 @@
/// \param X
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@ -177,7 +179,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@ -202,7 +204,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
/// Rounds each element of the 128-bit vector of [4 x float] to an
/// integer value according to the rounding control specified by the second
@ -234,7 +236,7 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
#define _mm_round_ps(X, M) \
#define _mm_round_ps(X, M) \
((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
/// Copies three upper elements of the first 128-bit vector operand to
@ -275,9 +277,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
#define _mm_round_ss(X, Y, M) \
((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (M)))
#define _mm_round_ss(X, Y, M) \
((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
(M)))
/// Rounds each element of the 128-bit vector of [2 x double] to an
/// integer value according to the rounding control specified by the second
@ -309,7 +311,7 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
#define _mm_round_pd(X, M) \
#define _mm_round_pd(X, M) \
((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
/// Copies the upper element of the first 128-bit vector operand to the
@ -350,9 +352,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
#define _mm_round_sd(X, Y, M) \
((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (M)))
#define _mm_round_sd(X, Y, M) \
((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
(M)))
/* SSE4 Packed Blending Intrinsics. */
/// Returns a 128-bit vector of [2 x double] where the values are
@ -379,9 +381,9 @@
/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
#define _mm_blend_pd(V1, V2, M) \
((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
(__v2df)(__m128d)(V2), (int)(M)))
#define _mm_blend_pd(V1, V2, M) \
((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \
(__v2df)(__m128d)(V2), (int)(M)))
/// Returns a 128-bit vector of [4 x float] where the values are selected
/// from either the first or second operand as specified by the third
@ -407,9 +409,9 @@
/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
#define _mm_blend_ps(V1, V2, M) \
((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
(__v4sf)(__m128)(V2), (int)(M)))
#define _mm_blend_ps(V1, V2, M) \
((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
(int)(M)))
/// Returns a 128-bit vector of [2 x double] where the values are
/// selected from either the first or second operand as specified by the
@ -431,11 +433,11 @@
/// position in the result. When a mask bit is 1, the corresponding 64-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
{
return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
(__v2df)__M);
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,
__m128d __V2,
__m128d __M) {
return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
(__v2df)__M);
}
/// Returns a 128-bit vector of [4 x float] where the values are
@ -458,11 +460,11 @@ _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
/// position in the result. When a mask bit is 1, the corresponding 32-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
{
return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
(__v4sf)__M);
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,
__m128 __V2,
__m128 __M) {
return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
(__v4sf)__M);
}
/// Returns a 128-bit vector of [16 x i8] where the values are selected
@ -485,11 +487,11 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
/// position in the result. When a mask bit is 1, the corresponding 8-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [16 x i8] containing the copied values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
{
return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
(__v16qi)__M);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
__m128i __V2,
__m128i __M) {
return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
(__v16qi)__M);
}
/// Returns a 128-bit vector of [8 x i16] where the values are selected
@ -516,9 +518,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [8 x i16] containing the copied values.
#define _mm_blend_epi16(V1, V2, M) \
((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
(__v8hi)(__m128i)(V2), (int)(M)))
#define _mm_blend_epi16(V1, V2, M) \
((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \
(__v8hi)(__m128i)(V2), (int)(M)))
/* SSE4 Dword Multiply Instructions. */
/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
@ -534,10 +536,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the products of both operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
__m128i __V2) {
return (__m128i)((__v4su)__V1 * (__v4su)__V2);
}
/// Multiplies corresponding even-indexed elements of two 128-bit
@ -554,10 +555,9 @@ _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [2 x i64] containing the products of both
/// operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mul_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Floating Point Dot Product Instructions. */
@ -593,9 +593,8 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// each [4 x float] subvector. If a bit is set, the dot product is returned
/// in the corresponding element; otherwise that element is set to zero.
/// \returns A 128-bit vector of [4 x float] containing the dot product.
#define _mm_dp_ps(X, Y, M) \
((__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (M)))
#define _mm_dp_ps(X, Y, M) \
((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
/// Computes the dot product of the two 128-bit vectors of [2 x double]
/// and returns it in the elements of the 128-bit result vector of
@ -628,9 +627,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// to the lowest element and bit [1] corresponding to the highest element of
/// each [2 x double] vector. If a bit is set, the dot product is returned in
/// the corresponding element; otherwise that element is set to zero.
#define _mm_dp_pd(X, Y, M) \
((__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (M)))
#define _mm_dp_pd(X, Y, M) \
((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
(M)))
/* SSE4 Streaming Load Hint Instruction. */
/// Loads integer values from a 128-bit aligned memory location to a
@ -645,10 +644,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// values.
/// \returns A 128-bit integer vector containing the data stored at the
/// specified memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_stream_load_si128 (__m128i const *__V)
{
return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_stream_load_si128(__m128i const *__V) {
return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
@ -665,10 +663,9 @@ _mm_stream_load_si128 (__m128i const *__V)
/// \param __V2
/// A 128-bit vector of [16 x i8]
/// \returns A 128-bit vector of [16 x i8] containing the lesser values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epi8 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_min((__v16qs) __V1, (__v16qs) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -684,10 +681,9 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit vector of [16 x i8] containing the greater values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epi8 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_max((__v16qs) __V1, (__v16qs) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -703,10 +699,9 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the lesser values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epu16 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_min((__v8hu) __V1, (__v8hu) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -722,10 +717,9 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the greater values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epu16 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_max((__v8hu) __V1, (__v8hu) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -741,10 +735,9 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the lesser values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_min((__v4si) __V1, (__v4si) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -760,10 +753,9 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the greater values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_max((__v4si) __V1, (__v4si) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -779,10 +771,9 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the lesser values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epu32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_min((__v4su) __V1, (__v4su) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@ -798,10 +789,9 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the greater values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epu32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_elementwise_max((__v4su) __V1, (__v4su) __V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
@ -869,21 +859,24 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of parameter \a X are returned. \n
/// 11: Bits [127:96] of parameter \a X are returned.
/// \returns A 32-bit integer containing the extracted 32 bits of float data.
#define _mm_extract_ps(X, N) \
__builtin_bit_cast(int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
#define _mm_extract_ps(X, N) \
__builtin_bit_cast( \
int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
#define _MM_EXTRACT_FLOAT(D, X, N) \
do { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } while (0)
#define _MM_EXTRACT_FLOAT(D, X, N) \
do { \
(D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
} while (0)
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
/* Extract a float from X at index N into the first index of the return. */
#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
_MM_MK_INSERTPS_NDX((N), 0, 0x0e))
#define _MM_PICK_OUT_PS(X, N) \
_mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
/// Constructs a 128-bit vector of [16 x i8] by first making a copy of
@ -926,9 +919,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 1110: Bits [119:112] of the result are used for insertion. \n
/// 1111: Bits [127:120] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
#define _mm_insert_epi8(X, I, N) \
((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
(int)(I), (int)(N)))
#define _mm_insert_epi8(X, I, N) \
((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \
(int)(N)))
/// Constructs a 128-bit vector of [4 x i32] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 32-bit
@ -958,9 +951,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of the result are used for insertion. \n
/// 11: Bits [127:96] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
#define _mm_insert_epi32(X, I, N) \
((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
(int)(I), (int)(N)))
#define _mm_insert_epi32(X, I, N) \
((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \
(int)(N)))
#ifdef __x86_64__
/// Constructs a 128-bit vector of [2 x i64] by first making a copy of
@ -989,9 +982,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] of the result are used for insertion. \n
/// 1: Bits [127:64] of the result are used for insertion. \n
/// \returns A 128-bit integer vector containing the constructed values.
#define _mm_insert_epi64(X, I, N) \
((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
(long long)(I), (int)(N)))
#define _mm_insert_epi64(X, I, N) \
((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \
(int)(N)))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
@ -1032,8 +1025,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \returns An unsigned integer, whose lower 8 bits are selected from the
/// 128-bit integer vector parameter and the remaining bits are assigned
/// zeros.
#define _mm_extract_epi8(X, N) \
((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
#define _mm_extract_epi8(X, N) \
((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
(int)(N)))
/// Extracts a 32-bit element from the 128-bit integer vector of
@ -1058,7 +1051,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Bits [127:96] of the parameter \a X are exracted.
/// \returns An integer, whose lower 32 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
#define _mm_extract_epi32(X, N) \
#define _mm_extract_epi32(X, N) \
((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
#ifdef __x86_64__
@ -1081,7 +1074,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] are returned. \n
/// 1: Bits [127:64] are returned. \n
/// \returns A 64-bit integer.
#define _mm_extract_epi64(X, N) \
#define _mm_extract_epi64(X, N) \
((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
#endif /* __x86_64 */
@ -1098,9 +1091,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_testz_si128(__m128i __M, __m128i __V)
{
static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,
__m128i __V) {
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
@ -1116,9 +1108,8 @@ _mm_testz_si128(__m128i __M, __m128i __V)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all ones; FALSE otherwise.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_testc_si128(__m128i __M, __m128i __V)
{
static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,
__m128i __V) {
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
@ -1135,9 +1126,8 @@ _mm_testc_si128(__m128i __M, __m128i __V)
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are neither all zeros nor all ones;
/// FALSE otherwise.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_testnzc_si128(__m128i __M, __m128i __V)
{
static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
__m128i __V) {
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
}
@ -1193,7 +1183,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param V
/// A 128-bit integer vector selecting which bits to test in operand \a M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
/// Compares each of the corresponding 64-bit values of the 128-bit
@ -1208,9 +1198,8 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
{
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
__m128i __V2) {
return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
@ -1228,12 +1217,13 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
/// extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi16(__m128i __V)
{
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
7),
__v8hi);
}
/// Sign-extends each of the lower four 8-bit integer elements of a
@ -1249,12 +1239,11 @@ _mm_cvtepi8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi32(__m128i __V)
{
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 8-bit integer elements of a
@ -1270,12 +1259,11 @@ _mm_cvtepi8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi64(__m128i __V)
{
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower four 16-bit integer elements of a
@ -1291,10 +1279,9 @@ _mm_cvtepi8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi16_epi32(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 16-bit integer elements of a
@ -1310,10 +1297,9 @@ _mm_cvtepi16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi16_epi64(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower two 32-bit integer elements of a
@ -1329,10 +1315,9 @@ _mm_cvtepi16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi32_epi64(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
}
/* SSE4 Packed Integer Zero-Extension. */
@ -1349,10 +1334,11 @@ _mm_cvtepi32_epi64(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
/// zero-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu8_epi16(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
7),
__v8hi);
}
/// Zero-extends each of the lower four 8-bit integer elements of a
@ -1368,10 +1354,9 @@ _mm_cvtepu8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu8_epi32(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 8-bit integer elements of a
@ -1387,10 +1372,9 @@ _mm_cvtepu8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu8_epi64(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower four 16-bit integer elements of a
@ -1406,10 +1390,9 @@ _mm_cvtepu8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu16_epi32(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 16-bit integer elements of a
@ -1425,10 +1408,9 @@ _mm_cvtepu16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu16_epi64(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower two 32-bit integer elements of a
@ -1444,10 +1426,9 @@ _mm_cvtepu16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu32_epi64(__m128i __V)
{
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
return (__m128i) __builtin_convertvector(
__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
}
/* SSE4 Pack with Unsigned Saturation. */
@ -1473,10 +1454,9 @@ _mm_cvtepu32_epi64(__m128i __V)
/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_packus_epi32(__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
__m128i __V2) {
return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
@ -1515,9 +1495,9 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \endcode
/// \returns A 128-bit integer vector containing the sums of the sets of
/// absolute differences between both operands.
#define _mm_mpsadbw_epu8(X, Y, M) \
((__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
(__v16qi)(__m128i)(Y), (M)))
#define _mm_mpsadbw_epu8(X, Y, M) \
((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
(__v16qi)(__m128i)(Y), (M)))
/// Finds the minimum unsigned 16-bit element in the input 128-bit
/// vector of [8 x u16] and returns it and along with its index.
@ -1532,10 +1512,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_minpos_epu16(__m128i __V)
{
return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
/* Handle the sse4.2 definitions here. */
@ -1544,33 +1522,34 @@ _mm_minpos_epu16(__m128i __V)
so we'll do the same. */
#undef __DEFAULT_FN_ATTRS
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
/* These specify the type of data that we're comparing. */
#define _SIDD_UBYTE_OPS 0x00
#define _SIDD_UWORD_OPS 0x01
#define _SIDD_SBYTE_OPS 0x02
#define _SIDD_SWORD_OPS 0x03
#define _SIDD_UBYTE_OPS 0x00
#define _SIDD_UWORD_OPS 0x01
#define _SIDD_SBYTE_OPS 0x02
#define _SIDD_SWORD_OPS 0x03
/* These specify the type of comparison operation. */
#define _SIDD_CMP_EQUAL_ANY 0x00
#define _SIDD_CMP_RANGES 0x04
#define _SIDD_CMP_EQUAL_EACH 0x08
#define _SIDD_CMP_EQUAL_ORDERED 0x0c
#define _SIDD_CMP_EQUAL_ANY 0x00
#define _SIDD_CMP_RANGES 0x04
#define _SIDD_CMP_EQUAL_EACH 0x08
#define _SIDD_CMP_EQUAL_ORDERED 0x0c
/* These macros specify the polarity of the operation. */
#define _SIDD_POSITIVE_POLARITY 0x00
#define _SIDD_NEGATIVE_POLARITY 0x10
#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
#define _SIDD_POSITIVE_POLARITY 0x00
#define _SIDD_NEGATIVE_POLARITY 0x10
#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
/* These macros are used in _mm_cmpXstri() to specify the return. */
#define _SIDD_LEAST_SIGNIFICANT 0x00
#define _SIDD_MOST_SIGNIFICANT 0x40
#define _SIDD_LEAST_SIGNIFICANT 0x00
#define _SIDD_MOST_SIGNIFICANT 0x40
/* These macros are used in _mm_cmpXstri() to specify the return. */
#define _SIDD_BIT_MASK 0x00
#define _SIDD_UNIT_MASK 0x40
#define _SIDD_BIT_MASK 0x00
#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
/// Uses the immediate operand \a M to perform a comparison of string
@ -1625,8 +1604,8 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times).
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
#define _mm_cmpistrm(A, B, M) \
((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
#define _mm_cmpistrm(A, B, M) \
((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -1679,9 +1658,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
#define _mm_cmpistri(A, B, M) \
((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
#define _mm_cmpistri(A, B, M) \
((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@ -1739,9 +1718,9 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times). \n
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
#define _mm_cmpestrm(A, LA, B, LB, M) \
((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestrm(A, LA, B, LB, M) \
((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -1798,9 +1777,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
#define _mm_cmpestri(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestri(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
@ -1850,8 +1829,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum; otherwise, returns 0.
#define _mm_cmpistra(A, B, M) \
((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
#define _mm_cmpistra(A, B, M) \
((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -1899,8 +1878,8 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
#define _mm_cmpistrc(A, B, M) \
((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
#define _mm_cmpistrc(A, B, M) \
((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -1947,8 +1926,8 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns bit 0 of the resulting bit mask.
#define _mm_cmpistro(A, B, M) \
((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
#define _mm_cmpistro(A, B, M) \
((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -1997,8 +1976,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
#define _mm_cmpistrs(A, B, M) \
((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
#define _mm_cmpistrs(A, B, M) \
((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -2047,8 +2026,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
#define _mm_cmpistrz(A, B, M) \
((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
#define _mm_cmpistrz(A, B, M) \
((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -2101,9 +2080,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum, otherwise, returns 0.
#define _mm_cmpestra(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestra(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -2155,9 +2134,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
#define _mm_cmpestrc(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestrc(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -2208,9 +2187,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns bit 0 of the resulting bit mask.
#define _mm_cmpestro(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestro(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -2263,9 +2242,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
#define _mm_cmpestrs(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestrs(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@ -2317,9 +2296,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
#define _mm_cmpestrz(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
#define _mm_cmpestrz(A, LA, B, LB, M) \
((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/* SSE4.2 Compare Packed Data -- Greater Than. */
@ -2336,9 +2315,8 @@ _mm_minpos_epu16(__m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
{
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
__m128i __V2) {
return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}