forked from OSchip/llvm-project
1432 lines
40 KiB
C
1432 lines
40 KiB
C
/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*
|
|
*===-----------------------------------------------------------------------===
|
|
*/
|
|
|
|
#ifndef __EMMINTRIN_H
|
|
#define __EMMINTRIN_H
|
|
|
|
#ifndef __SSE2__
|
|
#error "SSE2 instruction set not enabled"
|
|
#else
|
|
|
|
#include <xmmintrin.h>
|
|
|
|
typedef double __m128d __attribute__((__vector_size__(16)));
|
|
typedef long long __m128i __attribute__((__vector_size__(16)));
|
|
|
|
/* Type defines. */
|
|
typedef double __v2df __attribute__ ((__vector_size__ (16)));
|
|
typedef long long __v2di __attribute__ ((__vector_size__ (16)));
|
|
typedef short __v8hi __attribute__((__vector_size__(16)));
|
|
typedef char __v16qi __attribute__((__vector_size__(16)));
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__a[0] += __b[0];
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __a + __b;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__a[0] -= __b[0];
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __a - __b;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mul_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__a[0] *= __b[0];
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mul_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __a * __b;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_div_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__a[0] /= __b[0];
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_div_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __a / __b;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sqrt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__m128d __c = __builtin_ia32_sqrtsd(__b);
|
|
return (__m128d) { __c[0], __a[1] };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sqrt_pd(__m128d __a)
|
|
{
|
|
return __builtin_ia32_sqrtpd(__a);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_min_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_minsd(__a, __b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_min_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_minpd(__a, __b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_max_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_maxsd(__a, __b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_max_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_maxpd(__a, __b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_and_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)((__v4si)__a & (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_andnot_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)(~(__v4si)__a & (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_or_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)((__v4si)__a | (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_xor_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)((__v4si)__a ^ (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpeq_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 0);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmplt_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 1);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmple_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 2);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpgt_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__b, __a, 1);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpge_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__b, __a, 2);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpord_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 7);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpunord_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 3);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpneq_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 4);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpnlt_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 5);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpnle_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__a, __b, 6);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpngt_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__b, __a, 5);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpnge_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmppd(__b, __a, 6);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpeq_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 0);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmplt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 1);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmple_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 2);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpgt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__m128d __c = __builtin_ia32_cmpsd(__b, __a, 1);
|
|
return (__m128d) { __c[0], __a[1] };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpge_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__m128d __c = __builtin_ia32_cmpsd(__b, __a, 2);
|
|
return (__m128d) { __c[0], __a[1] };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpord_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 7);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpunord_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 3);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpneq_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 4);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpnlt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 5);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpnle_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d)__builtin_ia32_cmpsd(__a, __b, 6);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpngt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__m128d __c = __builtin_ia32_cmpsd(__b, __a, 5);
|
|
return (__m128d) { __c[0], __a[1] };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpnge_sd(__m128d __a, __m128d __b)
|
|
{
|
|
__m128d __c = __builtin_ia32_cmpsd(__b, __a, 6);
|
|
return (__m128d) { __c[0], __a[1] };
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_comieq_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_comisdeq(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_comilt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_comisdlt(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_comile_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_comisdle(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_comigt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_comisdgt(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_comige_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_comisdge(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_comineq_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_comisdneq(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_ucomieq_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_ucomisdeq(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_ucomilt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_ucomisdlt(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_ucomile_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_ucomisdle(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_ucomigt_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_ucomisdgt(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_ucomige_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_ucomisdge(__a, __b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_ucomineq_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_ia32_ucomisdneq(__a, __b);
|
|
}
|
|
|
|
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtpd_ps(__m128d __a)
|
|
{
|
|
return __builtin_ia32_cvtpd2ps(__a);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtps_pd(__m128 __a)
|
|
{
|
|
return __builtin_ia32_cvtps2pd(__a);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtepi32_pd(__m128i __a)
|
|
{
|
|
return __builtin_ia32_cvtdq2pd((__v4si)__a);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtpd_epi32(__m128d __a)
|
|
{
|
|
return __builtin_ia32_cvtpd2dq(__a);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsd_si32(__m128d __a)
|
|
{
|
|
return __builtin_ia32_cvtsd2si(__a);
|
|
}
|
|
|
|
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsd_ss(__m128 __a, __m128d __b)
|
|
{
|
|
__a[0] = __b[0];
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsi32_sd(__m128d __a, int __b)
|
|
{
|
|
__a[0] = __b;
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtss_sd(__m128d __a, __m128 __b)
|
|
{
|
|
__a[0] = __b[0];
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvttpd_epi32(__m128d __a)
|
|
{
|
|
return (__m128i)__builtin_ia32_cvttpd2dq(__a);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvttsd_si32(__m128d __a)
|
|
{
|
|
return __a[0];
|
|
}
|
|
|
|
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtpd_pi32(__m128d __a)
|
|
{
|
|
return (__m64)__builtin_ia32_cvtpd2pi(__a);
|
|
}
|
|
|
|
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvttpd_pi32(__m128d __a)
|
|
{
|
|
return (__m64)__builtin_ia32_cvttpd2pi(__a);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtpi32_pd(__m64 __a)
|
|
{
|
|
return __builtin_ia32_cvtpi2pd((__v2si)__a);
|
|
}
|
|
|
|
static __inline__ double __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsd_f64(__m128d __a)
|
|
{
|
|
return __a[0];
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_load_pd(double const *__dp)
|
|
{
|
|
return *(__m128d*)__dp;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_load1_pd(double const *__dp)
|
|
{
|
|
struct __mm_load1_pd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
double __u = ((struct __mm_load1_pd_struct*)__dp)->__u;
|
|
return (__m128d){ __u, __u };
|
|
}
|
|
|
|
#define _mm_load_pd1(dp) _mm_load1_pd(dp)
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_loadr_pd(double const *__dp)
|
|
{
|
|
__m128d __u = *(__m128d*)__dp;
|
|
return __builtin_shufflevector(__u, __u, 1, 0);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_loadu_pd(double const *__dp)
|
|
{
|
|
struct __loadu_pd {
|
|
__m128d __v;
|
|
} __attribute__((packed, may_alias));
|
|
return ((struct __loadu_pd*)__dp)->__v;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_load_sd(double const *__dp)
|
|
{
|
|
struct __mm_load_sd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
double __u = ((struct __mm_load_sd_struct*)__dp)->__u;
|
|
return (__m128d){ __u, 0 };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_loadh_pd(__m128d __a, double const *__dp)
|
|
{
|
|
struct __mm_loadh_pd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
double __u = ((struct __mm_loadh_pd_struct*)__dp)->__u;
|
|
return (__m128d){ __a[0], __u };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_loadl_pd(__m128d __a, double const *__dp)
|
|
{
|
|
struct __mm_loadl_pd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
double __u = ((struct __mm_loadl_pd_struct*)__dp)->__u;
|
|
return (__m128d){ __u, __a[1] };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_sd(double __w)
|
|
{
|
|
return (__m128d){ __w, 0 };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set1_pd(double __w)
|
|
{
|
|
return (__m128d){ __w, __w };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_pd(double __w, double __x)
|
|
{
|
|
return (__m128d){ __x, __w };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setr_pd(double __w, double __x)
|
|
{
|
|
return (__m128d){ __w, __x };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setzero_pd(void)
|
|
{
|
|
return (__m128d){ 0, 0 };
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_move_sd(__m128d __a, __m128d __b)
|
|
{
|
|
return (__m128d){ __b[0], __a[1] };
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_store_sd(double *__dp, __m128d __a)
|
|
{
|
|
struct __mm_store_sd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_store1_pd(double *__dp, __m128d __a)
|
|
{
|
|
struct __mm_store1_pd_struct {
|
|
double __u[2];
|
|
} __attribute__((__packed__, __may_alias__));
|
|
((struct __mm_store1_pd_struct*)__dp)->__u[0] = __a[0];
|
|
((struct __mm_store1_pd_struct*)__dp)->__u[1] = __a[0];
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_store_pd(double *__dp, __m128d __a)
|
|
{
|
|
*(__m128d *)__dp = __a;
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_storeu_pd(double *__dp, __m128d __a)
|
|
{
|
|
__builtin_ia32_storeupd(__dp, __a);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_storer_pd(double *__dp, __m128d __a)
|
|
{
|
|
__a = __builtin_shufflevector(__a, __a, 1, 0);
|
|
*(__m128d *)__dp = __a;
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_storeh_pd(double *__dp, __m128d __a)
|
|
{
|
|
struct __mm_storeh_pd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_storel_pd(double *__dp, __m128d __a)
|
|
{
|
|
struct __mm_storeh_pd_struct {
|
|
double __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v16qi)__a + (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v8hi)__a + (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v4si)__a + (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_si64(__m64 __a, __m64 __b)
|
|
{
|
|
return __a + __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_add_epi64(__m128i __a, __m128i __b)
|
|
{
|
|
return __a + __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_adds_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_adds_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_adds_epu8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_adds_epu16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_avg_epu8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_avg_epu16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_madd_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_max_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_max_epu8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_min_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_min_epu8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mulhi_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mulhi_epu16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mullo_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v8hi)__a * (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mul_su32(__m64 __a, __m64 __b)
|
|
{
|
|
return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mul_epu32(__m128i __a, __m128i __b)
|
|
{
|
|
return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sad_epu8(__m128i __a, __m128i __b)
|
|
{
|
|
return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v16qi)__a - (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v8hi)__a - (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v4si)__a - (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_si64(__m64 __a, __m64 __b)
|
|
{
|
|
return __a - __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sub_epi64(__m128i __a, __m128i __b)
|
|
{
|
|
return __a - __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_subs_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_subs_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_subs_epu8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_subs_epu16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_and_si128(__m128i __a, __m128i __b)
|
|
{
|
|
return __a & __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_andnot_si128(__m128i __a, __m128i __b)
|
|
{
|
|
return ~__a & __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_or_si128(__m128i __a, __m128i __b)
|
|
{
|
|
return __a | __b;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_xor_si128(__m128i __a, __m128i __b)
|
|
{
|
|
return __a ^ __b;
|
|
}
|
|
|
|
#define _mm_slli_si128(a, count) __extension__ ({ \
|
|
__m128i __a = (a); \
|
|
(__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); })
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_slli_epi16(__m128i __a, int __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sll_epi16(__m128i __a, __m128i __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_slli_epi32(__m128i __a, int __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sll_epi32(__m128i __a, __m128i __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_slli_epi64(__m128i __a, int __count)
|
|
{
|
|
return __builtin_ia32_psllqi128(__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sll_epi64(__m128i __a, __m128i __count)
|
|
{
|
|
return __builtin_ia32_psllq128(__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srai_epi16(__m128i __a, int __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sra_epi16(__m128i __a, __m128i __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srai_epi32(__m128i __a, int __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_sra_epi32(__m128i __a, __m128i __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
|
|
}
|
|
|
|
|
|
#define _mm_srli_si128(a, count) __extension__ ({ \
|
|
__m128i __a = (a); \
|
|
(__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); })
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srli_epi16(__m128i __a, int __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srl_epi16(__m128i __a, __m128i __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srli_epi32(__m128i __a, int __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srl_epi32(__m128i __a, __m128i __count)
|
|
{
|
|
return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srli_epi64(__m128i __a, int __count)
|
|
{
|
|
return __builtin_ia32_psrlqi128(__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_srl_epi64(__m128i __a, __m128i __count)
|
|
{
|
|
return __builtin_ia32_psrlq128(__a, __count);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpeq_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v16qi)__a == (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpeq_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v8hi)__a == (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpeq_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v4si)__a == (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpgt_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
/* This function always performs a signed comparison, but __v16qi is a char
|
|
which may be signed or unsigned. */
|
|
typedef signed char __v16qs __attribute__((__vector_size__(16)));
|
|
return (__m128i)((__v16qs)__a > (__v16qs)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpgt_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v8hi)__a > (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmpgt_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)((__v4si)__a > (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmplt_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return _mm_cmpgt_epi8(__b, __a);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmplt_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return _mm_cmpgt_epi16(__b, __a);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cmplt_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return _mm_cmpgt_epi32(__b, __a);
|
|
}
|
|
|
|
#ifdef __x86_64__
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsi64_sd(__m128d __a, long long __b)
|
|
{
|
|
__a[0] = __b;
|
|
return __a;
|
|
}
|
|
|
|
static __inline__ long long __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsd_si64(__m128d __a)
|
|
{
|
|
return __builtin_ia32_cvtsd2si64(__a);
|
|
}
|
|
|
|
static __inline__ long long __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvttsd_si64(__m128d __a)
|
|
{
|
|
return __a[0];
|
|
}
|
|
#endif
|
|
|
|
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtepi32_ps(__m128i __a)
|
|
{
|
|
return __builtin_ia32_cvtdq2ps((__v4si)__a);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtps_epi32(__m128 __a)
|
|
{
|
|
return (__m128i)__builtin_ia32_cvtps2dq(__a);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvttps_epi32(__m128 __a)
|
|
{
|
|
return (__m128i)__builtin_ia32_cvttps2dq(__a);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsi32_si128(int __a)
|
|
{
|
|
return (__m128i)(__v4si){ __a, 0, 0, 0 };
|
|
}
|
|
|
|
#ifdef __x86_64__
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsi64_si128(long long __a)
|
|
{
|
|
return (__m128i){ __a, 0 };
|
|
}
|
|
#endif
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsi128_si32(__m128i __a)
|
|
{
|
|
__v4si __b = (__v4si)__a;
|
|
return __b[0];
|
|
}
|
|
|
|
#ifdef __x86_64__
|
|
static __inline__ long long __attribute__((__always_inline__, __nodebug__))
|
|
_mm_cvtsi128_si64(__m128i __a)
|
|
{
|
|
return __a[0];
|
|
}
|
|
#endif
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_load_si128(__m128i const *__p)
|
|
{
|
|
return *__p;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_loadu_si128(__m128i const *__p)
|
|
{
|
|
struct __loadu_si128 {
|
|
__m128i __v;
|
|
} __attribute__((packed, may_alias));
|
|
return ((struct __loadu_si128*)__p)->__v;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_loadl_epi64(__m128i const *__p)
|
|
{
|
|
struct __mm_loadl_epi64_struct {
|
|
long long __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_epi64x(long long q1, long long q0)
|
|
{
|
|
return (__m128i){ q0, q1 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_epi64(__m64 q1, __m64 q0)
|
|
{
|
|
return (__m128i){ (long long)q0, (long long)q1 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_epi32(int i3, int i2, int i1, int i0)
|
|
{
|
|
return (__m128i)(__v4si){ i0, i1, i2, i3};
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
|
|
{
|
|
return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
|
|
{
|
|
return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set1_epi64x(long long __q)
|
|
{
|
|
return (__m128i){ __q, __q };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set1_epi64(__m64 __q)
|
|
{
|
|
return (__m128i){ (long long)__q, (long long)__q };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set1_epi32(int __i)
|
|
{
|
|
return (__m128i)(__v4si){ __i, __i, __i, __i };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set1_epi16(short __w)
|
|
{
|
|
return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_set1_epi8(char __b)
|
|
{
|
|
return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setr_epi64(__m64 q0, __m64 q1)
|
|
{
|
|
return (__m128i){ (long long)q0, (long long)q1 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setr_epi32(int i0, int i1, int i2, int i3)
|
|
{
|
|
return (__m128i)(__v4si){ i0, i1, i2, i3};
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
|
|
{
|
|
return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setr_epi8(char b0, char b1, char b2, char b3, char b4, char b5, char b6, char b7, char b8, char b9, char b10, char b11, char b12, char b13, char b14, char b15)
|
|
{
|
|
return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_setzero_si128(void)
|
|
{
|
|
return (__m128i){ 0LL, 0LL };
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_store_si128(__m128i *__p, __m128i __b)
|
|
{
|
|
*__p = __b;
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_storeu_si128(__m128i *__p, __m128i __b)
|
|
{
|
|
__builtin_ia32_storedqu((char *)__p, (__v16qi)__b);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
|
|
{
|
|
__builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_storel_epi64(__m128i *__p, __m128i __a)
|
|
{
|
|
struct __mm_storel_epi64_struct {
|
|
long long __u;
|
|
} __attribute__((__packed__, __may_alias__));
|
|
((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_stream_pd(double *__p, __m128d __a)
|
|
{
|
|
__builtin_ia32_movntpd(__p, __a);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_stream_si128(__m128i *__p, __m128i __a)
|
|
{
|
|
__builtin_ia32_movntdq(__p, __a);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_stream_si32(int *__p, int __a)
|
|
{
|
|
__builtin_ia32_movnti(__p, __a);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_clflush(void const *__p)
|
|
{
|
|
__builtin_ia32_clflush(__p);
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_lfence(void)
|
|
{
|
|
__builtin_ia32_lfence();
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_mfence(void)
|
|
{
|
|
__builtin_ia32_mfence();
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_packs_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_packs_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_packus_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_extract_epi16(__m128i __a, int __imm)
|
|
{
|
|
__v8hi __b = (__v8hi)__a;
|
|
return (unsigned short)__b[__imm];
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_insert_epi16(__m128i __a, int __b, int __imm)
|
|
{
|
|
__v8hi __c = (__v8hi)__a;
|
|
__c[__imm & 7] = __b;
|
|
return (__m128i)__c;
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_movemask_epi8(__m128i __a)
|
|
{
|
|
return __builtin_ia32_pmovmskb128((__v16qi)__a);
|
|
}
|
|
|
|
#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
|
|
__m128i __a = (a); \
|
|
(__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \
|
|
(imm) & 0x3, ((imm) & 0xc) >> 2, \
|
|
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
|
|
|
|
#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
|
|
__m128i __a = (a); \
|
|
(__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
|
|
(imm) & 0x3, ((imm) & 0xc) >> 2, \
|
|
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
|
|
4, 5, 6, 7); })
|
|
|
|
#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
|
|
__m128i __a = (a); \
|
|
(__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
|
|
0, 1, 2, 3, \
|
|
4 + (((imm) & 0x03) >> 0), \
|
|
4 + (((imm) & 0x0c) >> 2), \
|
|
4 + (((imm) & 0x30) >> 4), \
|
|
4 + (((imm) & 0xc0) >> 6)); })
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpackhi_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpackhi_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpackhi_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpackhi_epi64(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector(__a, __b, 1, 2+1);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpacklo_epi8(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpacklo_epi16(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpacklo_epi32(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpacklo_epi64(__m128i __a, __m128i __b)
|
|
{
|
|
return (__m128i)__builtin_shufflevector(__a, __b, 0, 2+0);
|
|
}
|
|
|
|
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_movepi64_pi64(__m128i __a)
|
|
{
|
|
return (__m64)__a[0];
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_movpi64_pi64(__m64 __a)
|
|
{
|
|
return (__m128i){ (long long)__a, 0 };
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_move_epi64(__m128i __a)
|
|
{
|
|
return __builtin_shufflevector(__a, (__m128i){ 0 }, 0, 2);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpackhi_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_shufflevector(__a, __b, 1, 2+1);
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_unpacklo_pd(__m128d __a, __m128d __b)
|
|
{
|
|
return __builtin_shufflevector(__a, __b, 0, 2+0);
|
|
}
|
|
|
|
static __inline__ int __attribute__((__always_inline__, __nodebug__))
|
|
_mm_movemask_pd(__m128d __a)
|
|
{
|
|
return __builtin_ia32_movmskpd(__a);
|
|
}
|
|
|
|
#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
|
|
__m128d __a = (a); \
|
|
__m128d __b = (b); \
|
|
__builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); })
|
|
|
|
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_castpd_ps(__m128d __a)
|
|
{
|
|
return (__m128)__a;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_castpd_si128(__m128d __a)
|
|
{
|
|
return (__m128i)__a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_castps_pd(__m128 __a)
|
|
{
|
|
return (__m128d)__a;
|
|
}
|
|
|
|
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
|
|
_mm_castps_si128(__m128 __a)
|
|
{
|
|
return (__m128i)__a;
|
|
}
|
|
|
|
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
|
|
_mm_castsi128_ps(__m128i __a)
|
|
{
|
|
return (__m128)__a;
|
|
}
|
|
|
|
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
|
|
_mm_castsi128_pd(__m128i __a)
|
|
{
|
|
return (__m128d)__a;
|
|
}
|
|
|
|
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
|
_mm_pause(void)
|
|
{
|
|
__asm__ volatile ("pause");
|
|
}
|
|
|
|
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
|
|
|
|
#endif /* __SSE2__ */
|
|
|
|
#endif /* __EMMINTRIN_H */
|