forked from OSchip/llvm-project
[WebAssembly] Finalize wasm_simd128.h intrinsics
Adds new intrinsics for instructions that are in the final SIMD spec but did not previously have intrinsics. Also updates the names of existing intrinsics to reflect the final names of the underlying instructions in the spec. Keeps the old names as deprecated functions to ease the transition to the new names. Differential Revision: https://reviews.llvm.org/D101112
This commit is contained in:
parent
2f6405ec80
commit
502f54049d
|
@ -66,7 +66,7 @@ TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i64_f64, "LLid", "nc", "nontrappi
|
|||
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i64_f64, "LLid", "nc", "nontrapping-fptoint")
|
||||
|
||||
// SIMD builtins
|
||||
TARGET_BUILTIN(__builtin_wasm_swizzle_v8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_swizzle_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i8x16, "iV16ScIi", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i8x16, "iV16UcIUi", "nc", "simd128")
|
||||
|
@ -97,6 +97,7 @@ TARGET_BUILTIN(__builtin_wasm_sub_sat_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
|
|||
TARGET_BUILTIN(__builtin_wasm_abs_i8x16, "V16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_abs_i16x8, "V8sV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_abs_i32x4, "V4iV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_abs_i64x2, "V2LLiV2LLi", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_min_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_min_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
|
||||
|
@ -141,12 +142,9 @@ TARGET_BUILTIN(__builtin_wasm_extadd_pairwise_i16x8_u_i32x4, "V4UiV8Us", "nc", "
|
|||
|
||||
TARGET_BUILTIN(__builtin_wasm_bitselect, "V4iV4iV4iV4i", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_shuffle_v8x16, "V16ScV16ScV16ScIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIi", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_shuffle_i8x16, "V16ScV16ScV16ScIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIi", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_any_true_i8x16, "iV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_any_true_i16x8, "iV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_any_true_i32x4, "iV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_any_true_i64x2, "iV2LLi", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_any_true_v128, "iV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_all_true_i8x16, "iV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_all_true_i16x8, "iV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_all_true_i32x4, "iV4i", "nc", "simd128")
|
||||
|
|
|
@ -17157,7 +17157,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
|
||||
return Builder.CreateCall(Callee, Value);
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
|
||||
case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
|
||||
Value *Src = EmitScalarExpr(E->getArg(0));
|
||||
Value *Indices = EmitScalarExpr(E->getArg(1));
|
||||
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
|
||||
|
@ -17256,7 +17256,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
}
|
||||
case WebAssembly::BI__builtin_wasm_abs_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_abs_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_abs_i32x4: {
|
||||
case WebAssembly::BI__builtin_wasm_abs_i32x4:
|
||||
case WebAssembly::BI__builtin_wasm_abs_i64x2: {
|
||||
Value *Vec = EmitScalarExpr(E->getArg(0));
|
||||
Value *Neg = Builder.CreateNeg(Vec, "neg");
|
||||
Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
|
||||
|
@ -17402,20 +17403,14 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
|
||||
return Builder.CreateCall(Callee, {Vec});
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i32x4:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_v128:
|
||||
case WebAssembly::BI__builtin_wasm_all_true_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_all_true_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_all_true_i32x4:
|
||||
case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
|
||||
unsigned IntNo;
|
||||
switch (BuiltinID) {
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i32x4:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_any_true_v128:
|
||||
IntNo = Intrinsic::wasm_anytrue;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_all_true_i8x16:
|
||||
|
@ -17566,7 +17561,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
Function *Callee = CGM.getIntrinsic(IntNo);
|
||||
return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
|
||||
case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
|
||||
Value *Ops[18];
|
||||
size_t OpIdx = 0;
|
||||
Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
|
||||
|
|
|
@ -40,6 +40,9 @@ typedef unsigned char __u8x8
|
|||
typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
typedef unsigned short __u16x4
|
||||
__attribute__((__vector_size__(8), __aligned__(8)));
|
||||
typedef int __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
typedef unsigned int __u32x2
|
||||
__attribute__((__vector_size__(8), __aligned__(8)));
|
||||
|
||||
#define __DEFAULT_FN_ATTRS \
|
||||
__attribute__((__always_inline__, __nodebug__, __target__("simd128"), \
|
||||
|
@ -57,102 +60,129 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_v8x16_load_splat(const void *__mem) {
|
||||
struct __wasm_v8x16_load_splat_struct {
|
||||
wasm_v128_load8_splat(const void *__mem) {
|
||||
struct __wasm_v128_load8_splat_struct {
|
||||
uint8_t __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
|
||||
uint8_t __v = ((const struct __wasm_v128_load8_splat_struct *)__mem)->__v;
|
||||
return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
|
||||
__v, __v, __v, __v, __v, __v, __v, __v};
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_v16x8_load_splat(const void *__mem) {
|
||||
struct __wasm_v16x8_load_splat_struct {
|
||||
wasm_v128_load16_splat(const void *__mem) {
|
||||
struct __wasm_v128_load16_splat_struct {
|
||||
uint16_t __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
|
||||
uint16_t __v = ((const struct __wasm_v128_load16_splat_struct *)__mem)->__v;
|
||||
return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_v32x4_load_splat(const void *__mem) {
|
||||
struct __wasm_v32x4_load_splat_struct {
|
||||
wasm_v128_load32_splat(const void *__mem) {
|
||||
struct __wasm_v128_load32_splat_struct {
|
||||
uint32_t __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
|
||||
uint32_t __v = ((const struct __wasm_v128_load32_splat_struct *)__mem)->__v;
|
||||
return (v128_t)(__u32x4){__v, __v, __v, __v};
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_v64x2_load_splat(const void *__mem) {
|
||||
struct __wasm_v64x2_load_splat_struct {
|
||||
wasm_v128_load64_splat(const void *__mem) {
|
||||
struct __wasm_v128_load64_splat_struct {
|
||||
uint64_t __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
|
||||
uint64_t __v = ((const struct __wasm_v128_load64_splat_struct *)__mem)->__v;
|
||||
return (v128_t)(__u64x2){__v, __v};
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_load_8x8(const void *__mem) {
|
||||
typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
struct __wasm_i16x8_load_8x8_struct {
|
||||
wasm_i16x8_load8x8(const void *__mem) {
|
||||
struct __wasm_i16x8_load8x8_struct {
|
||||
__i8x8 __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
__i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
|
||||
__i8x8 __v = ((const struct __wasm_i16x8_load8x8_struct *)__mem)->__v;
|
||||
return (v128_t) __builtin_convertvector(__v, __i16x8);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_load_8x8(const void *__mem) {
|
||||
typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
struct __wasm_u16x8_load_8x8_struct {
|
||||
wasm_u16x8_load8x8(const void *__mem) {
|
||||
struct __wasm_u16x8_load8x8_struct {
|
||||
__u8x8 __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
__u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
|
||||
__u8x8 __v = ((const struct __wasm_u16x8_load8x8_struct *)__mem)->__v;
|
||||
return (v128_t) __builtin_convertvector(__v, __u16x8);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_load_16x4(const void *__mem) {
|
||||
typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
struct __wasm_i32x4_load_16x4_struct {
|
||||
wasm_i32x4_load16x4(const void *__mem) {
|
||||
struct __wasm_i32x4_load16x4_struct {
|
||||
__i16x4 __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
__i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
|
||||
__i16x4 __v = ((const struct __wasm_i32x4_load16x4_struct *)__mem)->__v;
|
||||
return (v128_t) __builtin_convertvector(__v, __i32x4);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u32x4_load_16x4(const void *__mem) {
|
||||
typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
struct __wasm_u32x4_load_16x4_struct {
|
||||
wasm_u32x4_load16x4(const void *__mem) {
|
||||
struct __wasm_u32x4_load16x4_struct {
|
||||
__u16x4 __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
__u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
|
||||
__u16x4 __v = ((const struct __wasm_u32x4_load16x4_struct *)__mem)->__v;
|
||||
return (v128_t) __builtin_convertvector(__v, __u32x4);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i64x2_load_32x2(const void *__mem) {
|
||||
typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
struct __wasm_i64x2_load_32x2_struct {
|
||||
wasm_i64x2_load32x2(const void *__mem) {
|
||||
struct __wasm_i64x2_load32x2_struct {
|
||||
__i32x2 __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
__i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
|
||||
__i32x2 __v = ((const struct __wasm_i64x2_load32x2_struct *)__mem)->__v;
|
||||
return (v128_t) __builtin_convertvector(__v, __i64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u64x2_load_32x2(const void *__mem) {
|
||||
typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
|
||||
struct __wasm_u64x2_load_32x2_struct {
|
||||
wasm_u64x2_load32x2(const void *__mem) {
|
||||
struct __wasm_u64x2_load32x2_struct {
|
||||
__u32x2 __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
__u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
|
||||
__u32x2 __v = ((const struct __wasm_u64x2_load32x2_struct *)__mem)->__v;
|
||||
return (v128_t) __builtin_convertvector(__v, __u64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_v128_load32_zero(const void *__mem) {
|
||||
struct __wasm_v128_load32_zero_struct {
|
||||
int32_t __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
int32_t __v = ((const struct __wasm_v128_load32_zero_struct *)__mem)->__v;
|
||||
return (v128_t)(__i32x4){__v, 0, 0, 0};
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_v128_load64_zero(const void *__mem) {
|
||||
struct __wasm_v128_load64_zero_struct {
|
||||
int64_t __v;
|
||||
} __attribute__((__packed__, __may_alias__));
|
||||
int64_t __v = ((const struct __wasm_v128_load64_zero_struct *)__mem)->__v;
|
||||
return (v128_t)(__i64x2){__v, 0};
|
||||
}
|
||||
|
||||
#define wasm_v128_load8_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load8_lane((signed char *)(__ptr), (__i8x16)(__vec), \
|
||||
(__i)))
|
||||
|
||||
#define wasm_v128_load16_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load16_lane((short *)(__ptr), (__i16x8)(__vec), \
|
||||
(__i)))
|
||||
|
||||
#define wasm_v128_load32_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load32_lane((int *)(__ptr), (__i32x4)(__vec), (__i)))
|
||||
|
||||
#define wasm_v128_load64_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load64_lane((long long int *)(__ptr), \
|
||||
(__i64x2)(__vec), (__i)))
|
||||
|
||||
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
|
||||
v128_t __a) {
|
||||
// UB-free unaligned access copied from xmmintrin.h
|
||||
|
@ -162,6 +192,19 @@ static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
|
|||
((struct __wasm_v128_store_struct *)__mem)->__v = __a;
|
||||
}
|
||||
|
||||
#define wasm_v128_store8_lane(__ptr, __vec, __i) \
|
||||
(__builtin_wasm_store8_lane((signed char *)(__ptr), (__i8x16)(__vec), (__i)))
|
||||
|
||||
#define wasm_v128_store16_lane(__ptr, __vec, __i) \
|
||||
(__builtin_wasm_store16_lane((short *)(__ptr), (__i16x8)(__vec), (__i)))
|
||||
|
||||
#define wasm_v128_store32_lane(__ptr, __vec, __i) \
|
||||
(__builtin_wasm_store32_lane((int *)(__ptr), (__i32x4)(__vec), (__i)))
|
||||
|
||||
#define wasm_v128_store64_lane(__ptr, __vec, __i) \
|
||||
(__builtin_wasm_store64_lane((long long int *)(__ptr), (__i64x2)(__vec), \
|
||||
(__i)))
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
|
||||
int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
|
||||
|
@ -487,6 +530,36 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
|
|||
return (v128_t)((__u32x4)__a >= (__u32x4)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_eq(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__i64x2)__a == (__i64x2)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ne(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__i64x2)__a != (__i64x2)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_lt(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__i64x2)__a < (__i64x2)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_gt(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__i64x2)__a > (__i64x2)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_le(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__i64x2)__a <= (__i64x2)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ge(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__i64x2)__a >= (__i64x2)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__f32x4)__a == (__f32x4)__b);
|
||||
|
@ -571,6 +644,10 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
|
|||
return __a & ~__b;
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_v128_any_true(v128_t __a) {
|
||||
return __builtin_wasm_any_true_v128((__i8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
|
||||
v128_t __b,
|
||||
v128_t __mask) {
|
||||
|
@ -586,14 +663,18 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
|
|||
return (v128_t)(-(__u8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
|
||||
return __builtin_wasm_any_true_i8x16((__i8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
|
||||
return __builtin_wasm_all_true_i8x16((__i8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) {
|
||||
return __builtin_wasm_bitmask_i8x16((__i8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_popcnt_i8x16((__i8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
|
||||
int32_t __b) {
|
||||
return (v128_t)((__i8x16)__a << __b);
|
||||
|
@ -614,13 +695,13 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
|
|||
return (v128_t)((__u8x16)__a + (__u8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_add_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
|
||||
}
|
||||
|
||||
|
@ -629,13 +710,13 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
|
|||
return (v128_t)((__u8x16)__a - (__u8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_sub_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
|
||||
}
|
||||
|
||||
|
@ -672,14 +753,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
|
|||
return (v128_t)(-(__u16x8)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
|
||||
return __builtin_wasm_any_true_i16x8((__i16x8)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
|
||||
return __builtin_wasm_all_true_i16x8((__i16x8)__a);
|
||||
}
|
||||
|
||||
static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
|
||||
return __builtin_wasm_bitmask_i16x8((__i16x8)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
|
||||
int32_t __b) {
|
||||
return (v128_t)((__i16x8)__a << __b);
|
||||
|
@ -700,13 +781,13 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
|
|||
return (v128_t)((__u16x8)__a + (__u16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_add_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
|
||||
}
|
||||
|
||||
|
@ -715,13 +796,13 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
|
|||
return (v128_t)((__i16x8)__a - (__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_sub_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
|
||||
}
|
||||
|
||||
|
@ -763,14 +844,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
|
|||
return (v128_t)(-(__u32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
|
||||
return __builtin_wasm_any_true_i32x4((__i32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
|
||||
return __builtin_wasm_all_true_i32x4((__i32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
|
||||
return __builtin_wasm_bitmask_i32x4((__i32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
|
||||
int32_t __b) {
|
||||
return (v128_t)((__i32x4)__a << __b);
|
||||
|
@ -821,10 +902,27 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
|
|||
return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_dot_i16x8(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_dot_s_i32x4_i16x8((__i16x8)__a, (__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_abs(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_abs_i64x2((__i64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
|
||||
return (v128_t)(-(__u64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
|
||||
return __builtin_wasm_all_true_i64x2((__i64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
|
||||
return __builtin_wasm_bitmask_i64x2((__i64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
|
||||
int32_t __b) {
|
||||
return (v128_t)((__i64x2)__a << (int64_t)__b);
|
||||
|
@ -867,6 +965,22 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
|
|||
return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ceil(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_ceil_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_floor(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_floor_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_trunc(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_trunc_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_nearest(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_nearest_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__f32x4)__a + (__f32x4)__b);
|
||||
|
@ -919,6 +1033,22 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
|
|||
return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ceil(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_ceil_f64x2((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_floor(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_floor_f64x2((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_trunc(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_trunc_f64x2((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_nearest(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_nearest_f64x2((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)((__f64x2)__a + (__f64x2)__b);
|
||||
|
@ -960,12 +1090,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
|
||||
wasm_i32x4_trunc_sat_f32x4(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
|
||||
wasm_u32x4_trunc_sat_f32x4(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
|
||||
}
|
||||
|
||||
|
@ -979,38 +1109,68 @@ wasm_f32x4_convert_u32x4(v128_t __a) {
|
|||
return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
|
||||
}
|
||||
|
||||
#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_f64x2_convert_low_i32x4(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector((__i32x2){__a[0], __a[1]}, __f64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_f64x2_convert_low_u32x4(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector((__u32x2){__a[0], __a[1]}, __f64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_f32x4_demote_f64x2_zero(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_demote_zero_f64x2_f32x4((__f64x2)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_f64x2_promote_low_f32x4(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_promote_low_f32x4_f64x2((__f32x4)__a);
|
||||
}
|
||||
|
||||
#define wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
|
||||
__c7, __c8, __c9, __c10, __c11, __c12, __c13, \
|
||||
__c14, __c15) \
|
||||
((v128_t)__builtin_wasm_shuffle_v8x16( \
|
||||
((v128_t)__builtin_wasm_shuffle_i8x16( \
|
||||
(__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \
|
||||
__c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
|
||||
|
||||
#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
|
||||
#define wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
|
||||
__c7) \
|
||||
((v128_t)__builtin_wasm_shuffle_v8x16( \
|
||||
((v128_t)__builtin_wasm_shuffle_i8x16( \
|
||||
(__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \
|
||||
(__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \
|
||||
(__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \
|
||||
(__c7)*2 + 1))
|
||||
|
||||
#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
|
||||
((v128_t)__builtin_wasm_shuffle_v8x16( \
|
||||
#define wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
|
||||
((v128_t)__builtin_wasm_shuffle_i8x16( \
|
||||
(__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \
|
||||
(__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \
|
||||
(__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \
|
||||
(__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
|
||||
|
||||
#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
|
||||
((v128_t)__builtin_wasm_shuffle_v8x16( \
|
||||
#define wasm_i64x2_shuffle(__a, __b, __c0, __c1) \
|
||||
((v128_t)__builtin_wasm_shuffle_i8x16( \
|
||||
(__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \
|
||||
(__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \
|
||||
(__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \
|
||||
(__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_swizzle(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
|
||||
return (v128_t)__builtin_wasm_swizzle_i8x16((__i8x16)__a, (__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
|
@ -1038,7 +1198,7 @@ wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_widen_low_i8x16(v128_t __a) {
|
||||
wasm_i16x8_extend_low_i8x16(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],
|
||||
((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],
|
||||
|
@ -1047,7 +1207,7 @@ wasm_i16x8_widen_low_i8x16(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_widen_high_i8x16(v128_t __a) {
|
||||
wasm_i16x8_extend_high_i8x16(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],
|
||||
((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],
|
||||
|
@ -1056,7 +1216,7 @@ wasm_i16x8_widen_high_i8x16(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_widen_low_u8x16(v128_t __a) {
|
||||
wasm_u16x8_extend_low_u8x16(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],
|
||||
((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],
|
||||
|
@ -1065,7 +1225,7 @@ wasm_i16x8_widen_low_u8x16(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_widen_high_u8x16(v128_t __a) {
|
||||
wasm_u16x8_extend_high_u8x16(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],
|
||||
((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],
|
||||
|
@ -1074,7 +1234,7 @@ wasm_i16x8_widen_high_u8x16(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_widen_low_i16x8(v128_t __a) {
|
||||
wasm_i32x4_extend_low_i16x8(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],
|
||||
((__i16x8)__a)[3]},
|
||||
|
@ -1082,7 +1242,7 @@ wasm_i32x4_widen_low_i16x8(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_widen_high_i16x8(v128_t __a) {
|
||||
wasm_i32x4_extend_high_i16x8(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],
|
||||
((__i16x8)__a)[7]},
|
||||
|
@ -1090,7 +1250,7 @@ wasm_i32x4_widen_high_i16x8(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_widen_low_u16x8(v128_t __a) {
|
||||
wasm_u32x4_extend_low_u16x8(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],
|
||||
((__u16x8)__a)[3]},
|
||||
|
@ -1098,14 +1258,333 @@ wasm_i32x4_widen_low_u16x8(v128_t __a) {
|
|||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_widen_high_u16x8(v128_t __a) {
|
||||
wasm_u32x4_extend_high_u16x8(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],
|
||||
((__u16x8)__a)[7]},
|
||||
__u32x4);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i64x2_extend_low_i32x4(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__i32x2){((__i32x4)__a)[0], ((__i32x4)__a)[1]}, __i64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i64x2_extend_high_i32x4(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__i32x2){((__i32x4)__a)[2], ((__i32x4)__a)[3]}, __i64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u64x2_extend_low_u32x4(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__u32x2){((__u32x4)__a)[0], ((__u32x4)__a)[1]}, __u64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u64x2_extend_high_u32x4(v128_t __a) {
|
||||
return (v128_t) __builtin_convertvector(
|
||||
(__u32x2){((__u32x4)__a)[2], ((__u32x4)__a)[3]}, __u64x2);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_extadd_pairwise_i8x16(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_s_i16x8((__i8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_extadd_pairwise_u8x16(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_u_i16x8((__u8x16)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_extadd_pairwise_i16x8(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_s_i32x4((__i16x8)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u32x4_extadd_pairwise_u16x8(v128_t __a) {
|
||||
return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_u_i32x4((__u16x8)__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_extmul_low_i8x16(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_low_i8x16_s_i16x8((__i8x16)__a,
|
||||
(__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_extmul_high_i8x16(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_high_i8x16_s_i16x8((__i8x16)__a,
|
||||
(__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_extmul_low_u8x16(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_low_i8x16_u_i16x8((__u8x16)__a,
|
||||
(__u8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_extmul_high_u8x16(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_high_i8x16_u_i16x8((__u8x16)__a,
|
||||
(__u8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_extmul_low_i16x8(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_low_i16x8_s_i32x4((__i16x8)__a,
|
||||
(__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i32x4_extmul_high_i16x8(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_high_i16x8_s_i32x4((__i16x8)__a,
|
||||
(__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u32x4_extmul_low_u16x8(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_low_i16x8_u_i32x4((__u16x8)__a,
|
||||
(__u16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u32x4_extmul_high_u16x8(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_high_i16x8_u_i32x4((__u16x8)__a,
|
||||
(__u16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i64x2_extmul_low_i32x4(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_low_i32x4_s_i64x2((__i32x4)__a,
|
||||
(__i32x4)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i64x2_extmul_high_i32x4(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_high_i32x4_s_i64x2((__i32x4)__a,
|
||||
(__i32x4)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u64x2_extmul_low_u32x4(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_low_i32x4_u_i64x2((__u32x4)__a,
|
||||
(__u32x4)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u64x2_extmul_high_u32x4(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_extmul_high_i32x4_u_i64x2((__u32x4)__a,
|
||||
(__u32x4)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_q15mulr_sat(v128_t __a,
|
||||
v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_q15mulr_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
|
||||
}
|
||||
|
||||
// Old intrinsic names supported to ease transitioning to the standard names. Do
|
||||
// not use these; they will be removed in the near future.
|
||||
|
||||
#define __DEPRECATED_FN_ATTRS(__replacement) \
|
||||
__DEFAULT_FN_ATTRS __attribute__( \
|
||||
(deprecated("use " __replacement " instead", __replacement)))
|
||||
|
||||
#define __WASM_STR(X) #X
|
||||
|
||||
#ifdef __DEPRECATED
|
||||
#define __DEPRECATED_WASM_MACRO(__name, __replacement) \
|
||||
_Pragma(__WASM_STR(GCC warning( \
|
||||
"'" __name "' is deprecated: use '" __replacement "' instead")))
|
||||
#else
|
||||
#define __DEPRECATED_WASM_MACRO(__name, __replacement)
|
||||
#endif
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load8_splat")
|
||||
wasm_v8x16_load_splat(const void *__mem) {
|
||||
return wasm_v128_load8_splat(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load16_splat")
|
||||
wasm_v16x8_load_splat(const void *__mem) {
|
||||
return wasm_v128_load16_splat(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load32_splat")
|
||||
wasm_v32x4_load_splat(const void *__mem) {
|
||||
return wasm_v128_load32_splat(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load64_splat")
|
||||
wasm_v64x2_load_splat(const void *__mem) {
|
||||
return wasm_v128_load64_splat(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_load8x8")
|
||||
wasm_i16x8_load_8x8(const void *__mem) {
|
||||
return wasm_i16x8_load8x8(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_load8x8")
|
||||
wasm_u16x8_load_8x8(const void *__mem) {
|
||||
return wasm_u16x8_load8x8(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_load16x4")
|
||||
wasm_i32x4_load_16x4(const void *__mem) {
|
||||
return wasm_i32x4_load16x4(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_load16x4")
|
||||
wasm_u32x4_load_16x4(const void *__mem) {
|
||||
return wasm_u32x4_load16x4(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i64x2_load32x2")
|
||||
wasm_i64x2_load_32x2(const void *__mem) {
|
||||
return wasm_i64x2_load32x2(__mem);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u64x2_load32x2")
|
||||
wasm_u64x2_load_32x2(const void *__mem) {
|
||||
return wasm_u64x2_load32x2(__mem);
|
||||
}
|
||||
|
||||
#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
|
||||
__c7, __c8, __c9, __c10, __c11, __c12, __c13, \
|
||||
__c14, __c15) \
|
||||
__DEPRECATED_WASM_MACRO("wasm_v8x16_shuffle", "wasm_i8x16_shuffle") \
|
||||
wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
|
||||
__c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15)
|
||||
|
||||
#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
|
||||
__c7) \
|
||||
__DEPRECATED_WASM_MACRO("wasm_v16x8_shuffle", "wasm_i16x8_shuffle") \
|
||||
wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)
|
||||
|
||||
#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
|
||||
__DEPRECATED_WASM_MACRO("wasm_v32x4_shuffle", "wasm_i32x4_shuffle") \
|
||||
wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)
|
||||
|
||||
#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
|
||||
__DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \
|
||||
wasm_i64x2_shuffle(__a, __b, __c0, __c1)
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle")
|
||||
wasm_v8x16_swizzle(v128_t __a, v128_t __b) {
|
||||
return wasm_i8x16_swizzle(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
|
||||
wasm_i8x16_any_true(v128_t __a) {
|
||||
return wasm_v128_any_true(__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
|
||||
wasm_i16x8_any_true(v128_t __a) {
|
||||
return wasm_v128_any_true(__a);
|
||||
}
|
||||
|
||||
static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
|
||||
wasm_i32x4_any_true(v128_t __a) {
|
||||
return wasm_v128_any_true(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_add_sat")
|
||||
wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_i8x16_add_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_add_sat")
|
||||
wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_u8x16_add_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_sub_sat")
|
||||
wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_i8x16_sub_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_sub_sat")
|
||||
wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_u8x16_sub_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_add_sat")
|
||||
wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_i16x8_add_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_add_sat")
|
||||
wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_u16x8_add_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_sub_sat")
|
||||
wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_i16x8_sub_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_sub_sat")
|
||||
wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return wasm_u16x8_sub_sat(__a, __b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_low_i8x16")
|
||||
wasm_i16x8_widen_low_i8x16(v128_t __a) {
|
||||
return wasm_i16x8_extend_low_i8x16(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_high_i8x16")
|
||||
wasm_i16x8_widen_high_i8x16(v128_t __a) {
|
||||
return wasm_i16x8_extend_high_i8x16(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_low_u8x16")
|
||||
wasm_i16x8_widen_low_u8x16(v128_t __a) {
|
||||
return wasm_u16x8_extend_low_u8x16(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_high_u8x16")
|
||||
wasm_i16x8_widen_high_u8x16(v128_t __a) {
|
||||
return wasm_u16x8_extend_high_u8x16(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_low_i16x8")
|
||||
wasm_i32x4_widen_low_i16x8(v128_t __a) {
|
||||
return wasm_i32x4_extend_low_i16x8(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_high_i16x8")
|
||||
wasm_i32x4_widen_high_i16x8(v128_t __a) {
|
||||
return wasm_i32x4_extend_high_i16x8(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_low_u16x8")
|
||||
wasm_i32x4_widen_low_u16x8(v128_t __a) {
|
||||
return wasm_u32x4_extend_low_u16x8(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_high_u16x8")
|
||||
wasm_i32x4_widen_high_u16x8(v128_t __a) {
|
||||
return wasm_u32x4_extend_high_u16x8(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_trunc_sat_f32x4")
|
||||
wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
|
||||
return wasm_i32x4_trunc_sat_f32x4(__a);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_trunc_sat_f32x4")
|
||||
wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
|
||||
return wasm_u32x4_trunc_sat_f32x4(__a);
|
||||
}
|
||||
|
||||
// Undefine helper macros
|
||||
#undef __DEFAULT_FN_ATTRS
|
||||
#undef __DEPRECATED_FN_ATTRS
|
||||
|
||||
#endif // __WASM_SIMD128_H
|
||||
|
|
|
@ -400,6 +400,12 @@ i32x4 abs_i32x4(i32x4 v) {
|
|||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i64x2 abs_i64x2(i64x2 v) {
|
||||
return __builtin_wasm_abs_i64x2(v);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.abs.v2i64(<2 x i64> %v, i1 false)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i8x16 min_s_i8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_min_s_i8x16(x, y);
|
||||
// WEBASSEMBLY: %0 = icmp slt <16 x i8> %x, %y
|
||||
|
@ -650,30 +656,12 @@ i8x16 popcnt(i8x16 x) {
|
|||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
int any_true_i8x16(i8x16 x) {
|
||||
return __builtin_wasm_any_true_i8x16(x);
|
||||
int any_true_v128(i8x16 x) {
|
||||
return __builtin_wasm_any_true_v128(x);
|
||||
// WEBASSEMBLY: call i32 @llvm.wasm.anytrue.v16i8(<16 x i8> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
int any_true_i16x8(i16x8 x) {
|
||||
return __builtin_wasm_any_true_i16x8(x);
|
||||
// WEBASSEMBLY: call i32 @llvm.wasm.anytrue.v8i16(<8 x i16> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
int any_true_i32x4(i32x4 x) {
|
||||
return __builtin_wasm_any_true_i32x4(x);
|
||||
// WEBASSEMBLY: call i32 @llvm.wasm.anytrue.v4i32(<4 x i32> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
int any_true_i64x2(i64x2 x) {
|
||||
return __builtin_wasm_any_true_i64x2(x);
|
||||
// WEBASSEMBLY: call i32 @llvm.wasm.anytrue.v2i64(<2 x i64> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
int all_true_i8x16(i8x16 x) {
|
||||
return __builtin_wasm_all_true_i8x16(x);
|
||||
// WEBASSEMBLY: call i32 @llvm.wasm.alltrue.v16i8(<16 x i8> %x)
|
||||
|
@ -928,13 +916,13 @@ i64x2 load64_zero(long long *p) {
|
|||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
i8x16 swizzle_v8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_swizzle_v8x16(x, y);
|
||||
i8x16 swizzle_i8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_swizzle_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.wasm.swizzle(<16 x i8> %x, <16 x i8> %y)
|
||||
}
|
||||
|
||||
i8x16 shuffle(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_shuffle_v8x16(x, y, 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
return __builtin_wasm_shuffle_i8x16(x, y, 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.wasm.shuffle(<16 x i8> %x, <16 x i8> %y,
|
||||
// WEBASSEMBLY-SAME: i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
|
||||
|
|
|
@ -42,6 +42,19 @@ int main() {
|
|||
wasm_v32x4_shuffle(v, v, 0, 1, 2, 3);
|
||||
wasm_v64x2_shuffle(v, v, 0, 1);
|
||||
|
||||
uint8_t lane8 = 0;
|
||||
uint16_t lane16 = 0;
|
||||
uint32_t lane32 = 0;
|
||||
uint64_t lane64 = 0;
|
||||
v = wasm_v128_load8_lane(&lane8, v, 0);
|
||||
v = wasm_v128_load16_lane(&lane16, v, 0);
|
||||
v = wasm_v128_load32_lane(&lane32, v, 0);
|
||||
v = wasm_v128_load64_lane(&lane64, v, 0);
|
||||
wasm_v128_store8_lane(&lane8, v, 15);
|
||||
wasm_v128_store16_lane(&lane16, v, 7);
|
||||
wasm_v128_store32_lane(&lane32, v, 3);
|
||||
wasm_v128_store64_lane(&lane64, v, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue