forked from OSchip/llvm-project
[WebAssembly] Finalize SIMD names and opcodes
Updates the names (e.g. widen => extend, saturate => sat) and opcodes of all SIMD instructions to match the finalized SIMD spec. Deliberately does not change the public interface in wasm_simd128.h yet; that will require more care. Depends on D98466. Differential Revision: https://reviews.llvm.org/D98676
This commit is contained in:
parent
2f2ae08da9
commit
f5764a8654
|
@ -84,15 +84,15 @@ TARGET_BUILTIN(__builtin_wasm_replace_lane_i64x2, "V2LLiV2LLiIiLLi", "nc", "simd
|
|||
TARGET_BUILTIN(__builtin_wasm_replace_lane_f32x4, "V4fV4fIif", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_replace_lane_f64x2, "V2dV2dIid", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_add_saturate_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_saturate_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_saturate_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_sat_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_sat_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_sat_s_i16x8, "V8sV8sV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_add_sat_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_saturate_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_sat_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_sat_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_sat_s_i16x8, "V8sV8sV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_sub_sat_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_abs_i8x16, "V16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_abs_i16x8, "V8sV8s", "nc", "simd128")
|
||||
|
@ -116,7 +116,7 @@ TARGET_BUILTIN(__builtin_wasm_avgr_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
|
|||
|
||||
TARGET_BUILTIN(__builtin_wasm_popcnt_i8x16, "V16ScV16Sc", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_q15mulr_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_q15mulr_sat_s_i16x8, "V8sV8sV8s", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_extmul_low_i8x16_s_i16x8, "V8sV16ScV16Sc", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_extmul_high_i8x16_s_i16x8, "V8sV16ScV16Sc", "nc", "simd128")
|
||||
|
@ -191,15 +191,15 @@ TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8UsV8Us", "nc", "simd
|
|||
TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4UiV4Ui", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_widen_low_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_widen_high_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_widen_low_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_widen_high_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_extend_low_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_extend_high_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_extend_low_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_extend_high_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_convert_low_s_i32x4_f64x2, "V2dV4i", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_convert_low_u_i32x4_f64x2, "V2dV4Ui", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4, "V4iV2d", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4, "V4iV2d", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_demote_zero_f64x2_f32x4, "V4fV2d", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_promote_low_f32x4_f64x2, "V2dV4f", "nc", "simd128")
|
||||
|
||||
|
|
|
@ -17194,31 +17194,31 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
llvm_unreachable("unexpected builtin ID");
|
||||
}
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
|
||||
unsigned IntNo;
|
||||
switch (BuiltinID) {
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
|
||||
IntNo = Intrinsic::sadd_sat;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
|
||||
IntNo = Intrinsic::uadd_sat;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
|
||||
IntNo = Intrinsic::wasm_sub_saturate_signed;
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
|
||||
IntNo = Intrinsic::wasm_sub_sat_signed;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
|
||||
IntNo = Intrinsic::wasm_sub_saturate_unsigned;
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
|
||||
case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
|
||||
IntNo = Intrinsic::wasm_sub_sat_unsigned;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected builtin ID");
|
||||
|
@ -17286,11 +17286,10 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
ConvertType(E->getType()));
|
||||
return Builder.CreateCall(Callee, {LHS, RHS});
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_q15mulr_saturate_s_i16x8: {
|
||||
case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
|
||||
Value *LHS = EmitScalarExpr(E->getArg(0));
|
||||
Value *RHS = EmitScalarExpr(E->getArg(1));
|
||||
Function *Callee =
|
||||
CGM.getIntrinsic(Intrinsic::wasm_q15mulr_saturate_signed);
|
||||
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
|
||||
return Builder.CreateCall(Callee, {LHS, RHS});
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
|
||||
|
@ -17456,24 +17455,24 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
|
||||
return Builder.CreateCall(Callee, {Low, High});
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2: {
|
||||
case WebAssembly::BI__builtin_wasm_extend_low_s_i32x4_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_extend_high_s_i32x4_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_extend_low_u_i32x4_i64x2:
|
||||
case WebAssembly::BI__builtin_wasm_extend_high_u_i32x4_i64x2: {
|
||||
Value *Vec = EmitScalarExpr(E->getArg(0));
|
||||
unsigned IntNo;
|
||||
switch (BuiltinID) {
|
||||
case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_widen_low_signed;
|
||||
case WebAssembly::BI__builtin_wasm_extend_low_s_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_extend_low_signed;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_widen_high_signed;
|
||||
case WebAssembly::BI__builtin_wasm_extend_high_s_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_extend_high_signed;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_widen_low_unsigned;
|
||||
case WebAssembly::BI__builtin_wasm_extend_low_u_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_extend_low_unsigned;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_widen_high_unsigned;
|
||||
case WebAssembly::BI__builtin_wasm_extend_high_u_i32x4_i64x2:
|
||||
IntNo = Intrinsic::wasm_extend_high_unsigned;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected builtin ID");
|
||||
|
@ -17498,16 +17497,16 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
|
|||
Function *Callee = CGM.getIntrinsic(IntNo);
|
||||
return Builder.CreateCall(Callee, Vec);
|
||||
}
|
||||
case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
|
||||
case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4: {
|
||||
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
|
||||
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
|
||||
Value *Vec = EmitScalarExpr(E->getArg(0));
|
||||
unsigned IntNo;
|
||||
switch (BuiltinID) {
|
||||
case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
|
||||
IntNo = Intrinsic::wasm_trunc_saturate_zero_signed;
|
||||
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
|
||||
IntNo = Intrinsic::wasm_trunc_sat_zero_signed;
|
||||
break;
|
||||
case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4:
|
||||
IntNo = Intrinsic::wasm_trunc_saturate_zero_unsigned;
|
||||
case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
|
||||
IntNo = Intrinsic::wasm_trunc_sat_zero_unsigned;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected builtin ID");
|
||||
|
|
|
@ -616,14 +616,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
|
|||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
|
||||
(__i8x16)__b);
|
||||
return (v128_t)__builtin_wasm_add_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__u8x16)__a,
|
||||
(__u8x16)__b);
|
||||
return (v128_t)__builtin_wasm_add_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
|
||||
|
@ -633,14 +631,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
|
|||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
|
||||
(__i8x16)__b);
|
||||
return (v128_t)__builtin_wasm_sub_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__u8x16)__a,
|
||||
(__u8x16)__b);
|
||||
return (v128_t)__builtin_wasm_sub_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
|
||||
|
@ -706,14 +702,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
|
|||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
|
||||
(__i16x8)__b);
|
||||
return (v128_t)__builtin_wasm_add_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__u16x8)__a,
|
||||
(__u16x8)__b);
|
||||
return (v128_t)__builtin_wasm_add_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
|
||||
|
@ -723,14 +717,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
|
|||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
|
||||
(__i16x8)__b);
|
||||
return (v128_t)__builtin_wasm_sub_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS
|
||||
wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
|
||||
return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__u16x8)__a,
|
||||
(__u16x8)__b);
|
||||
return (v128_t)__builtin_wasm_sub_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
|
||||
}
|
||||
|
||||
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
|
||||
|
|
|
@ -340,44 +340,44 @@ void store64_lane(long long *p, i64x2 v) {
|
|||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i8x16 add_saturate_s_i8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_add_saturate_s_i8x16(x, y);
|
||||
i8x16 add_sat_s_i8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_add_sat_s_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.sadd.sat.v16i8(
|
||||
// WEBASSEMBLY-SAME: <16 x i8> %x, <16 x i8> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
u8x16 add_saturate_u_i8x16(u8x16 x, u8x16 y) {
|
||||
return __builtin_wasm_add_saturate_u_i8x16(x, y);
|
||||
u8x16 add_sat_u_i8x16(u8x16 x, u8x16 y) {
|
||||
return __builtin_wasm_add_sat_u_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.uadd.sat.v16i8(
|
||||
// WEBASSEMBLY-SAME: <16 x i8> %x, <16 x i8> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i16x8 add_saturate_s_i16x8(i16x8 x, i16x8 y) {
|
||||
return __builtin_wasm_add_saturate_s_i16x8(x, y);
|
||||
i16x8 add_sat_s_i16x8(i16x8 x, i16x8 y) {
|
||||
return __builtin_wasm_add_sat_s_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.sadd.sat.v8i16(
|
||||
// WEBASSEMBLY-SAME: <8 x i16> %x, <8 x i16> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
u16x8 add_saturate_u_i16x8(u16x8 x, u16x8 y) {
|
||||
return __builtin_wasm_add_saturate_u_i16x8(x, y);
|
||||
u16x8 add_sat_u_i16x8(u16x8 x, u16x8 y) {
|
||||
return __builtin_wasm_add_sat_u_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.uadd.sat.v8i16(
|
||||
// WEBASSEMBLY-SAME: <8 x i16> %x, <8 x i16> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i8x16 sub_saturate_s_i8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_sub_saturate_s_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.wasm.sub.saturate.signed.v16i8(
|
||||
i8x16 sub_sat_s_i8x16(i8x16 x, i8x16 y) {
|
||||
return __builtin_wasm_sub_sat_s_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.wasm.sub.sat.signed.v16i8(
|
||||
// WEBASSEMBLY-SAME: <16 x i8> %x, <16 x i8> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
u8x16 sub_saturate_u_i8x16(u8x16 x, u8x16 y) {
|
||||
return __builtin_wasm_sub_saturate_u_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.wasm.sub.saturate.unsigned.v16i8(
|
||||
u8x16 sub_sat_u_i8x16(u8x16 x, u8x16 y) {
|
||||
return __builtin_wasm_sub_sat_u_i8x16(x, y);
|
||||
// WEBASSEMBLY: call <16 x i8> @llvm.wasm.sub.sat.unsigned.v16i8(
|
||||
// WEBASSEMBLY-SAME: <16 x i8> %x, <16 x i8> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
@ -484,16 +484,16 @@ u32x4 max_u_i32x4(u32x4 x, u32x4 y) {
|
|||
// WEBASSEMBLY-NEXT: ret <4 x i32> %1
|
||||
}
|
||||
|
||||
i16x8 sub_saturate_s_i16x8(i16x8 x, i16x8 y) {
|
||||
return __builtin_wasm_sub_saturate_s_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.wasm.sub.saturate.signed.v8i16(
|
||||
i16x8 sub_sat_s_i16x8(i16x8 x, i16x8 y) {
|
||||
return __builtin_wasm_sub_sat_s_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.wasm.sub.sat.signed.v8i16(
|
||||
// WEBASSEMBLY-SAME: <8 x i16> %x, <8 x i16> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
u16x8 sub_saturate_u_i16x8(u16x8 x, u16x8 y) {
|
||||
return __builtin_wasm_sub_saturate_u_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.wasm.sub.saturate.unsigned.v8i16(
|
||||
u16x8 sub_sat_u_i16x8(u16x8 x, u16x8 y) {
|
||||
return __builtin_wasm_sub_sat_u_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.wasm.sub.sat.unsigned.v8i16(
|
||||
// WEBASSEMBLY-SAME: <8 x i16> %x, <8 x i16> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
@ -512,9 +512,9 @@ u16x8 avgr_u_i16x8(u16x8 x, u16x8 y) {
|
|||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i16x8 q15mulr_saturate_s_i16x8(i16x8 x, i16x8 y) {
|
||||
return __builtin_wasm_q15mulr_saturate_s_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.wasm.q15mulr.saturate.signed(
|
||||
i16x8 q15mulr_sat_s_i16x8(i16x8 x, i16x8 y) {
|
||||
return __builtin_wasm_q15mulr_sat_s_i16x8(x, y);
|
||||
// WEBASSEMBLY: call <8 x i16> @llvm.wasm.q15mulr.sat.signed(
|
||||
// WEBASSEMBLY-SAME: <8 x i16> %x, <8 x i16> %y)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
@ -896,27 +896,27 @@ u16x8 narrow_u_i16x8_i32x4(u32x4 low, u32x4 high) {
|
|||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
i64x2 widen_low_s_i32x4_i64x2(i32x4 x) {
|
||||
return __builtin_wasm_widen_low_s_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.low.signed(<4 x i32> %x)
|
||||
i64x2 extend_low_s_i32x4_i64x2(i32x4 x) {
|
||||
return __builtin_wasm_extend_low_s_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.extend.low.signed(<4 x i32> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
i64x2 widen_high_s_i32x4_i64x2(i32x4 x) {
|
||||
return __builtin_wasm_widen_high_s_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.high.signed(<4 x i32> %x)
|
||||
i64x2 extend_high_s_i32x4_i64x2(i32x4 x) {
|
||||
return __builtin_wasm_extend_high_s_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.extend.high.signed(<4 x i32> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
u64x2 widen_low_u_i32x4_i64x2(u32x4 x) {
|
||||
return __builtin_wasm_widen_low_u_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.low.unsigned(<4 x i32> %x)
|
||||
u64x2 extend_low_u_i32x4_i64x2(u32x4 x) {
|
||||
return __builtin_wasm_extend_low_u_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.extend.low.unsigned(<4 x i32> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
u64x2 widen_high_u_i32x4_i64x2(u32x4 x) {
|
||||
return __builtin_wasm_widen_high_u_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.widen.high.unsigned(<4 x i32> %x)
|
||||
u64x2 extend_high_u_i32x4_i64x2(u32x4 x) {
|
||||
return __builtin_wasm_extend_high_u_i32x4_i64x2(x);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.extend.high.unsigned(<4 x i32> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
|
@ -932,15 +932,15 @@ f64x2 convert_low_u_i32x4_f64x2(u32x4 x) {
|
|||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
i32x4 trunc_saturate_zero_s_f64x2_i32x4(f64x2 x) {
|
||||
return __builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4(x);
|
||||
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.trunc.saturate.zero.signed(<2 x double> %x)
|
||||
i32x4 trunc_sat_zero_s_f64x2_i32x4(f64x2 x) {
|
||||
return __builtin_wasm_trunc_sat_zero_s_f64x2_i32x4(x);
|
||||
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.trunc.sat.zero.signed(<2 x double> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
u32x4 trunc_saturate_zero_u_f64x2_i32x4(f64x2 x) {
|
||||
return __builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4(x);
|
||||
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.trunc.saturate.zero.unsigned(<2 x double> %x)
|
||||
u32x4 trunc_sat_zero_u_f64x2_i32x4(f64x2 x) {
|
||||
return __builtin_wasm_trunc_sat_zero_u_f64x2_i32x4(x);
|
||||
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.trunc.sat.zero.unsigned(<2 x double> %x)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
|
|
|
@ -115,11 +115,11 @@ def int_wasm_shuffle :
|
|||
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
|
||||
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_sub_saturate_signed :
|
||||
def int_wasm_sub_sat_signed :
|
||||
Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_sub_saturate_unsigned :
|
||||
def int_wasm_sub_sat_unsigned :
|
||||
Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
|
@ -158,17 +158,17 @@ def int_wasm_narrow_unsigned :
|
|||
[IntrNoMem, IntrSpeculatable]>;
|
||||
|
||||
// TODO: Replace these intrinsics with normal ISel patterns once i32x4 to i64x2
|
||||
// widening is merged to the proposal.
|
||||
def int_wasm_widen_low_signed :
|
||||
// extending is merged to the proposal.
|
||||
def int_wasm_extend_low_signed :
|
||||
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_widen_high_signed :
|
||||
def int_wasm_extend_high_signed :
|
||||
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_widen_low_unsigned :
|
||||
def int_wasm_extend_low_unsigned :
|
||||
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_widen_high_unsigned :
|
||||
def int_wasm_extend_high_unsigned :
|
||||
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable]>;
|
||||
|
||||
def int_wasm_q15mulr_saturate_signed :
|
||||
def int_wasm_q15mulr_sat_signed :
|
||||
Intrinsic<[llvm_v8i16_ty],
|
||||
[llvm_v8i16_ty, llvm_v8i16_ty],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
|
@ -308,10 +308,10 @@ def int_wasm_convert_low_signed :
|
|||
def int_wasm_convert_low_unsigned :
|
||||
Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_trunc_saturate_zero_signed :
|
||||
def int_wasm_trunc_sat_zero_signed :
|
||||
Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_trunc_saturate_zero_unsigned :
|
||||
def int_wasm_trunc_sat_zero_unsigned :
|
||||
Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty],
|
||||
[IntrNoMem, IntrSpeculatable]>;
|
||||
def int_wasm_demote_zero :
|
||||
|
|
|
@ -29,10 +29,10 @@ HANDLE_NODETYPE(SWIZZLE)
|
|||
HANDLE_NODETYPE(VEC_SHL)
|
||||
HANDLE_NODETYPE(VEC_SHR_S)
|
||||
HANDLE_NODETYPE(VEC_SHR_U)
|
||||
HANDLE_NODETYPE(WIDEN_LOW_S)
|
||||
HANDLE_NODETYPE(WIDEN_LOW_U)
|
||||
HANDLE_NODETYPE(WIDEN_HIGH_S)
|
||||
HANDLE_NODETYPE(WIDEN_HIGH_U)
|
||||
HANDLE_NODETYPE(EXTEND_LOW_S)
|
||||
HANDLE_NODETYPE(EXTEND_LOW_U)
|
||||
HANDLE_NODETYPE(EXTEND_HIGH_S)
|
||||
HANDLE_NODETYPE(EXTEND_HIGH_U)
|
||||
HANDLE_NODETYPE(THROW)
|
||||
HANDLE_NODETYPE(CATCH)
|
||||
HANDLE_NODETYPE(MEMORY_COPY)
|
||||
|
|
|
@ -1898,8 +1898,8 @@ performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
|
|||
return DAG.getBitcast(DstType, NewShuffle);
|
||||
}
|
||||
|
||||
static SDValue performVectorWidenCombine(SDNode *N,
|
||||
TargetLowering::DAGCombinerInfo &DCI) {
|
||||
static SDValue
|
||||
performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
|
||||
auto &DAG = DCI.DAG;
|
||||
assert(N->getOpcode() == ISD::SIGN_EXTEND ||
|
||||
N->getOpcode() == ISD::ZERO_EXTEND);
|
||||
|
@ -1933,10 +1933,10 @@ static SDValue performVectorWidenCombine(SDNode *N,
|
|||
bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
|
||||
bool IsLow = Index == 0;
|
||||
|
||||
unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::WIDEN_LOW_S
|
||||
: WebAssemblyISD::WIDEN_HIGH_S)
|
||||
: (IsLow ? WebAssemblyISD::WIDEN_LOW_U
|
||||
: WebAssemblyISD::WIDEN_HIGH_U);
|
||||
unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
|
||||
: WebAssemblyISD::EXTEND_HIGH_S)
|
||||
: (IsLow ? WebAssemblyISD::EXTEND_LOW_U
|
||||
: WebAssemblyISD::EXTEND_HIGH_U);
|
||||
|
||||
return DAG.getNode(Op, SDLoc(N), ResVT, Source);
|
||||
}
|
||||
|
@ -1951,6 +1951,6 @@ WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
|
|||
return performVECTOR_SHUFFLECombine(N, DCI);
|
||||
case ISD::SIGN_EXTEND:
|
||||
case ISD::ZERO_EXTEND:
|
||||
return performVectorWidenCombine(N, DCI);
|
||||
return performVectorExtendCombine(N, DCI);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -266,8 +266,8 @@ multiclass SIMDLoadZero<Vec vec, bits<32> simdop> {
|
|||
|
||||
// TODO: Also support v4f32 and v2f64 once the instructions are merged
|
||||
// to the proposal
|
||||
defm "" : SIMDLoadZero<I32x4, 252>;
|
||||
defm "" : SIMDLoadZero<I64x2, 253>;
|
||||
defm "" : SIMDLoadZero<I32x4, 0x5c>;
|
||||
defm "" : SIMDLoadZero<I64x2, 0x5d>;
|
||||
|
||||
foreach vec = [I32x4, I64x2] in {
|
||||
defvar loadpat = !cast<Intrinsic>("int_wasm_load"#vec.lane_bits#"_zero");
|
||||
|
@ -302,10 +302,10 @@ multiclass SIMDLoadLane<Vec vec, bits<32> simdop> {
|
|||
|
||||
// TODO: Also support v4f32 and v2f64 once the instructions are merged
|
||||
// to the proposal
|
||||
defm "" : SIMDLoadLane<I8x16, 88>;
|
||||
defm "" : SIMDLoadLane<I16x8, 89>;
|
||||
defm "" : SIMDLoadLane<I32x4, 90>;
|
||||
defm "" : SIMDLoadLane<I64x2, 91>;
|
||||
defm "" : SIMDLoadLane<I8x16, 0x54>;
|
||||
defm "" : SIMDLoadLane<I16x8, 0x55>;
|
||||
defm "" : SIMDLoadLane<I32x4, 0x56>;
|
||||
defm "" : SIMDLoadLane<I64x2, 0x57>;
|
||||
|
||||
// Select loads with no constant offset.
|
||||
multiclass LoadLanePatNoOffset<Vec vec, SDPatternOperator kind> {
|
||||
|
@ -375,10 +375,10 @@ multiclass SIMDStoreLane<Vec vec, bits<32> simdop> {
|
|||
|
||||
// TODO: Also support v4f32 and v2f64 once the instructions are merged
|
||||
// to the proposal
|
||||
defm "" : SIMDStoreLane<I8x16, 92>;
|
||||
defm "" : SIMDStoreLane<I16x8, 93>;
|
||||
defm "" : SIMDStoreLane<I32x4, 94>;
|
||||
defm "" : SIMDStoreLane<I64x2, 95>;
|
||||
defm "" : SIMDStoreLane<I8x16, 0x58>;
|
||||
defm "" : SIMDStoreLane<I16x8, 0x59>;
|
||||
defm "" : SIMDStoreLane<I32x4, 0x5a>;
|
||||
defm "" : SIMDStoreLane<I64x2, 0x5b>;
|
||||
|
||||
// Select stores with no constant offset.
|
||||
multiclass StoreLanePatNoOffset<Vec vec, Intrinsic kind> {
|
||||
|
@ -917,19 +917,19 @@ multiclass SIMDBinaryInt<SDPatternOperator node, string name, bits<32> baseInst>
|
|||
defm "" : SIMDBinary<I64x2, node, name, !add(baseInst, 96)>;
|
||||
}
|
||||
|
||||
// Integer addition: add / add_saturate_s / add_saturate_u
|
||||
// Integer addition: add / add_sat_s / add_sat_u
|
||||
let isCommutable = 1 in {
|
||||
defm ADD : SIMDBinaryInt<add, "add", 110>;
|
||||
defm ADD_SAT_S : SIMDBinaryIntSmall<saddsat, "add_saturate_s", 111>;
|
||||
defm ADD_SAT_U : SIMDBinaryIntSmall<uaddsat, "add_saturate_u", 112>;
|
||||
defm ADD_SAT_S : SIMDBinaryIntSmall<saddsat, "add_sat_s", 111>;
|
||||
defm ADD_SAT_U : SIMDBinaryIntSmall<uaddsat, "add_sat_u", 112>;
|
||||
} // isCommutable = 1
|
||||
|
||||
// Integer subtraction: sub / sub_saturate_s / sub_saturate_u
|
||||
// Integer subtraction: sub / sub_sat_s / sub_sat_u
|
||||
defm SUB : SIMDBinaryInt<sub, "sub", 113>;
|
||||
defm SUB_SAT_S :
|
||||
SIMDBinaryIntSmall<int_wasm_sub_saturate_signed, "sub_saturate_s", 114>;
|
||||
SIMDBinaryIntSmall<int_wasm_sub_sat_signed, "sub_sat_s", 114>;
|
||||
defm SUB_SAT_U :
|
||||
SIMDBinaryIntSmall<int_wasm_sub_saturate_unsigned, "sub_saturate_u", 115>;
|
||||
SIMDBinaryIntSmall<int_wasm_sub_sat_unsigned, "sub_sat_u", 115>;
|
||||
|
||||
// Integer multiplication: mul
|
||||
let isCommutable = 1 in
|
||||
|
@ -980,31 +980,31 @@ multiclass SIMDExtBinary<Vec vec, Intrinsic node, string name, bits<32> simdop>
|
|||
}
|
||||
|
||||
defm EXTMUL_LOW_S :
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_low_signed, "extmul_low_i8x16_s", 154>;
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_low_signed, "extmul_low_i8x16_s", 0x9c>;
|
||||
defm EXTMUL_HIGH_S :
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_high_signed, "extmul_high_i8x16_s", 157>;
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_high_signed, "extmul_high_i8x16_s", 0x9d>;
|
||||
defm EXTMUL_LOW_U :
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_low_unsigned, "extmul_low_i8x16_u", 158>;
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_low_unsigned, "extmul_low_i8x16_u", 0x9e>;
|
||||
defm EXTMUL_HIGH_U :
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_high_unsigned, "extmul_high_i8x16_u", 159>;
|
||||
SIMDExtBinary<I16x8, int_wasm_extmul_high_unsigned, "extmul_high_i8x16_u", 0x9f>;
|
||||
|
||||
defm EXTMUL_LOW_S :
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_low_signed, "extmul_low_i16x8_s", 187>;
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_low_signed, "extmul_low_i16x8_s", 0xbc>;
|
||||
defm EXTMUL_HIGH_S :
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_high_signed, "extmul_high_i16x8_s", 189>;
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_high_signed, "extmul_high_i16x8_s", 0xbd>;
|
||||
defm EXTMUL_LOW_U :
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_low_unsigned, "extmul_low_i16x8_u", 190>;
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_low_unsigned, "extmul_low_i16x8_u", 0xbe>;
|
||||
defm EXTMUL_HIGH_U :
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_high_unsigned, "extmul_high_i16x8_u", 191>;
|
||||
SIMDExtBinary<I32x4, int_wasm_extmul_high_unsigned, "extmul_high_i16x8_u", 0xbf>;
|
||||
|
||||
defm EXTMUL_LOW_S :
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_low_signed, "extmul_low_i32x4_s", 210>;
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_low_signed, "extmul_low_i32x4_s", 0xdc>;
|
||||
defm EXTMUL_HIGH_S :
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_high_signed, "extmul_high_i32x4_s", 211>;
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_high_signed, "extmul_high_i32x4_s", 0xdd>;
|
||||
defm EXTMUL_LOW_U :
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_low_unsigned, "extmul_low_i32x4_u", 214>;
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_low_unsigned, "extmul_low_i32x4_u", 0xde>;
|
||||
defm EXTMUL_HIGH_U :
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_high_unsigned, "extmul_high_i32x4_u", 215>;
|
||||
SIMDExtBinary<I64x2, int_wasm_extmul_high_unsigned, "extmul_high_i32x4_u", 0xdf>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Floating-point unary arithmetic
|
||||
|
@ -1025,14 +1025,14 @@ defm NEG : SIMDUnaryFP<fneg, "neg", 225>;
|
|||
defm SQRT : SIMDUnaryFP<fsqrt, "sqrt", 227>;
|
||||
|
||||
// Rounding: ceil, floor, trunc, nearest
|
||||
defm CEIL : SIMDUnary<F32x4, int_wasm_ceil, "ceil", 216>;
|
||||
defm FLOOR : SIMDUnary<F32x4, int_wasm_floor, "floor", 217>;
|
||||
defm TRUNC: SIMDUnary<F32x4, int_wasm_trunc, "trunc", 218>;
|
||||
defm NEAREST: SIMDUnary<F32x4, int_wasm_nearest, "nearest", 219>;
|
||||
defm CEIL : SIMDUnary<F64x2, int_wasm_ceil, "ceil", 220>;
|
||||
defm FLOOR : SIMDUnary<F64x2, int_wasm_floor, "floor", 221>;
|
||||
defm TRUNC: SIMDUnary<F64x2, int_wasm_trunc, "trunc", 222>;
|
||||
defm NEAREST: SIMDUnary<F64x2, int_wasm_nearest, "nearest", 223>;
|
||||
defm CEIL : SIMDUnary<F32x4, int_wasm_ceil, "ceil", 0x67>;
|
||||
defm FLOOR : SIMDUnary<F32x4, int_wasm_floor, "floor", 0x68>;
|
||||
defm TRUNC: SIMDUnary<F32x4, int_wasm_trunc, "trunc", 0x69>;
|
||||
defm NEAREST: SIMDUnary<F32x4, int_wasm_nearest, "nearest", 0x6a>;
|
||||
defm CEIL : SIMDUnary<F64x2, int_wasm_ceil, "ceil", 0x74>;
|
||||
defm FLOOR : SIMDUnary<F64x2, int_wasm_floor, "floor", 0x75>;
|
||||
defm TRUNC: SIMDUnary<F64x2, int_wasm_trunc, "trunc", 0x7a>;
|
||||
defm NEAREST: SIMDUnary<F64x2, int_wasm_nearest, "nearest", 0x94>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Floating-point binary arithmetic
|
||||
|
@ -1089,42 +1089,42 @@ defm "" : SIMDConvert<I32x4, F32x4, fp_to_uint, "trunc_sat_f32x4_u", 249>;
|
|||
defm "" : SIMDConvert<F32x4, I32x4, sint_to_fp, "convert_i32x4_s", 250>;
|
||||
defm "" : SIMDConvert<F32x4, I32x4, uint_to_fp, "convert_i32x4_u", 251>;
|
||||
|
||||
// Lower llvm.wasm.trunc.saturate.* to saturating instructions
|
||||
// Lower llvm.wasm.trunc.sat.* to saturating instructions
|
||||
def : Pat<(v4i32 (int_wasm_trunc_saturate_signed (v4f32 V128:$src))),
|
||||
(fp_to_sint_I32x4 $src)>;
|
||||
def : Pat<(v4i32 (int_wasm_trunc_saturate_unsigned (v4f32 V128:$src))),
|
||||
(fp_to_uint_I32x4 $src)>;
|
||||
|
||||
// Widening operations
|
||||
def widen_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
|
||||
def widen_low_s : SDNode<"WebAssemblyISD::WIDEN_LOW_S", widen_t>;
|
||||
def widen_high_s : SDNode<"WebAssemblyISD::WIDEN_HIGH_S", widen_t>;
|
||||
def widen_low_u : SDNode<"WebAssemblyISD::WIDEN_LOW_U", widen_t>;
|
||||
def widen_high_u : SDNode<"WebAssemblyISD::WIDEN_HIGH_U", widen_t>;
|
||||
// Extending operations
|
||||
def extend_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
|
||||
def extend_low_s : SDNode<"WebAssemblyISD::EXTEND_LOW_S", extend_t>;
|
||||
def extend_high_s : SDNode<"WebAssemblyISD::EXTEND_HIGH_S", extend_t>;
|
||||
def extend_low_u : SDNode<"WebAssemblyISD::EXTEND_LOW_U", extend_t>;
|
||||
def extend_high_u : SDNode<"WebAssemblyISD::EXTEND_HIGH_U", extend_t>;
|
||||
|
||||
// TODO: refactor this to be uniform for i64x2 if the numbering is not changed.
|
||||
multiclass SIMDWiden<Vec vec, bits<32> baseInst> {
|
||||
defm "" : SIMDConvert<vec, vec.split, widen_low_s,
|
||||
"widen_low_"#vec.split.prefix#"_s", baseInst>;
|
||||
defm "" : SIMDConvert<vec, vec.split, widen_high_s,
|
||||
"widen_high_"#vec.split.prefix#"_s", !add(baseInst, 1)>;
|
||||
defm "" : SIMDConvert<vec, vec.split, widen_low_u,
|
||||
"widen_low_"#vec.split.prefix#"_u", !add(baseInst, 2)>;
|
||||
defm "" : SIMDConvert<vec, vec.split, widen_high_u,
|
||||
"widen_high_"#vec.split.prefix#"_u", !add(baseInst, 3)>;
|
||||
multiclass SIMDExtend<Vec vec, bits<32> baseInst> {
|
||||
defm "" : SIMDConvert<vec, vec.split, extend_low_s,
|
||||
"extend_low_"#vec.split.prefix#"_s", baseInst>;
|
||||
defm "" : SIMDConvert<vec, vec.split, extend_high_s,
|
||||
"extend_high_"#vec.split.prefix#"_s", !add(baseInst, 1)>;
|
||||
defm "" : SIMDConvert<vec, vec.split, extend_low_u,
|
||||
"extend_low_"#vec.split.prefix#"_u", !add(baseInst, 2)>;
|
||||
defm "" : SIMDConvert<vec, vec.split, extend_high_u,
|
||||
"extend_high_"#vec.split.prefix#"_u", !add(baseInst, 3)>;
|
||||
}
|
||||
|
||||
defm "" : SIMDWiden<I16x8, 135>;
|
||||
defm "" : SIMDWiden<I32x4, 167>;
|
||||
defm "" : SIMDExtend<I16x8, 135>;
|
||||
defm "" : SIMDExtend<I32x4, 167>;
|
||||
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_widen_low_signed,
|
||||
"widen_low_i32x4_s", 199>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_widen_high_signed,
|
||||
"widen_high_i32x4_s", 200>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_widen_low_unsigned,
|
||||
"widen_low_i32x4_u", 201>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_widen_high_unsigned,
|
||||
"widen_high_i32x4_u", 202>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_extend_low_signed,
|
||||
"extend_low_i32x4_s", 199>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_extend_high_signed,
|
||||
"extend_high_i32x4_s", 200>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_extend_low_unsigned,
|
||||
"extend_low_i32x4_u", 201>;
|
||||
defm "" : SIMDConvert<I64x2, I32x4, int_wasm_extend_high_unsigned,
|
||||
"extend_high_i32x4_u", 202>;
|
||||
|
||||
// Narrowing operations
|
||||
multiclass SIMDNarrow<Vec vec, bits<32> baseInst> {
|
||||
|
@ -1232,31 +1232,31 @@ def : Pat<(t1 (bitconvert (t2 V128:$v))), (t1 V128:$v)>;
|
|||
|
||||
// Extended pairwise addition
|
||||
defm "" : SIMDConvert<I16x8, I8x16, int_wasm_extadd_pairwise_signed,
|
||||
"extadd_pairwise_i8x16_s", 0xc2>;
|
||||
"extadd_pairwise_i8x16_s", 0x7c>;
|
||||
defm "" : SIMDConvert<I16x8, I8x16, int_wasm_extadd_pairwise_unsigned,
|
||||
"extadd_pairwise_i8x16_u", 0xc3>;
|
||||
"extadd_pairwise_i8x16_u", 0x7d>;
|
||||
defm "" : SIMDConvert<I32x4, I16x8, int_wasm_extadd_pairwise_signed,
|
||||
"extadd_pairwise_i16x8_s", 0xa5>;
|
||||
"extadd_pairwise_i16x8_s", 0x7e>;
|
||||
defm "" : SIMDConvert<I32x4, I16x8, int_wasm_extadd_pairwise_unsigned,
|
||||
"extadd_pairwise_i16x8_u", 0xa6>;
|
||||
"extadd_pairwise_i16x8_u", 0x7f>;
|
||||
|
||||
// Prototype f64x2 conversions
|
||||
defm "" : SIMDConvert<F64x2, I32x4, int_wasm_convert_low_signed,
|
||||
"convert_low_i32x4_s", 0x53>;
|
||||
defm "" : SIMDConvert<F64x2, I32x4, int_wasm_convert_low_unsigned,
|
||||
"convert_low_i32x4_u", 0x54>;
|
||||
defm "" : SIMDConvert<I32x4, F64x2, int_wasm_trunc_saturate_zero_signed,
|
||||
"trunc_sat_zero_f64x2_s", 0x55>;
|
||||
defm "" : SIMDConvert<I32x4, F64x2, int_wasm_trunc_saturate_zero_unsigned,
|
||||
"trunc_sat_zero_f64x2_u", 0x56>;
|
||||
defm "" : SIMDConvert<F32x4, F64x2, int_wasm_demote_zero,
|
||||
"demote_zero_f64x2", 0x57>;
|
||||
"demote_zero_f64x2", 0x5e>;
|
||||
defm "" : SIMDConvert<F64x2, F32x4, int_wasm_promote_low,
|
||||
"promote_low_f32x4", 0x69>;
|
||||
"promote_low_f32x4", 0x5f>;
|
||||
defm "" : SIMDConvert<I32x4, F64x2, int_wasm_trunc_sat_zero_signed,
|
||||
"trunc_sat_zero_f64x2_s", 0xfc>;
|
||||
defm "" : SIMDConvert<I32x4, F64x2, int_wasm_trunc_sat_zero_unsigned,
|
||||
"trunc_sat_zero_f64x2_u", 0xfd>;
|
||||
defm "" : SIMDConvert<F64x2, I32x4, int_wasm_convert_low_signed,
|
||||
"convert_low_i32x4_s", 0xfe>;
|
||||
defm "" : SIMDConvert<F64x2, I32x4, int_wasm_convert_low_unsigned,
|
||||
"convert_low_i32x4_u", 0xff>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Saturating Rounding Q-Format Multiplication
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
defm Q15MULR_SAT_S :
|
||||
SIMDBinary<I16x8, int_wasm_q15mulr_saturate_signed, "q15mulr_sat_s", 156>;
|
||||
SIMDBinary<I16x8, int_wasm_q15mulr_sat_signed, "q15mulr_sat_s", 0x82>;
|
||||
|
|
|
@ -1,121 +1,121 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mattr=+simd128 | FileCheck %s
|
||||
|
||||
;; Test that SIMD widening operations can be successfully selected
|
||||
;; Test that SIMD extending operations can be successfully selected
|
||||
|
||||
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
|
||||
target triple = "wasm32-unknown-unknown"
|
||||
|
||||
define <8 x i16> @widen_low_i8x16_s(<16 x i8> %v) {
|
||||
; CHECK-LABEL: widen_low_i8x16_s:
|
||||
; CHECK: .functype widen_low_i8x16_s (v128) -> (v128)
|
||||
define <8 x i16> @extend_low_i8x16_s(<16 x i8> %v) {
|
||||
; CHECK-LABEL: extend_low_i8x16_s:
|
||||
; CHECK: .functype extend_low_i8x16_s (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i16x8.widen_low_i8x16_s
|
||||
; CHECK-NEXT: i16x8.extend_low_i8x16_s
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%widened = sext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %widened
|
||||
%extended = sext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %extended
|
||||
}
|
||||
|
||||
define <8 x i16> @widen_low_i8x16_u(<16 x i8> %v) {
|
||||
; CHECK-LABEL: widen_low_i8x16_u:
|
||||
; CHECK: .functype widen_low_i8x16_u (v128) -> (v128)
|
||||
define <8 x i16> @extend_low_i8x16_u(<16 x i8> %v) {
|
||||
; CHECK-LABEL: extend_low_i8x16_u:
|
||||
; CHECK: .functype extend_low_i8x16_u (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i16x8.widen_low_i8x16_u
|
||||
; CHECK-NEXT: i16x8.extend_low_i8x16_u
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
|
||||
<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
%widened = zext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %widened
|
||||
%extended = zext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %extended
|
||||
}
|
||||
|
||||
define <8 x i16> @widen_high_i8x16_s(<16 x i8> %v) {
|
||||
; CHECK-LABEL: widen_high_i8x16_s:
|
||||
; CHECK: .functype widen_high_i8x16_s (v128) -> (v128)
|
||||
define <8 x i16> @extend_high_i8x16_s(<16 x i8> %v) {
|
||||
; CHECK-LABEL: extend_high_i8x16_s:
|
||||
; CHECK: .functype extend_high_i8x16_s (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i16x8.widen_high_i8x16_s
|
||||
; CHECK-NEXT: i16x8.extend_high_i8x16_s
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%widened = sext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %widened
|
||||
%extended = sext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %extended
|
||||
}
|
||||
|
||||
define <8 x i16> @widen_high_i8x16_u(<16 x i8> %v) {
|
||||
; CHECK-LABEL: widen_high_i8x16_u:
|
||||
; CHECK: .functype widen_high_i8x16_u (v128) -> (v128)
|
||||
define <8 x i16> @extend_high_i8x16_u(<16 x i8> %v) {
|
||||
; CHECK-LABEL: extend_high_i8x16_u:
|
||||
; CHECK: .functype extend_high_i8x16_u (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i16x8.widen_high_i8x16_u
|
||||
; CHECK-NEXT: i16x8.extend_high_i8x16_u
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <16 x i8> %v, <16 x i8> undef,
|
||||
<8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
%widened = zext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %widened
|
||||
%extended = zext <8 x i8> %low to <8 x i16>
|
||||
ret <8 x i16> %extended
|
||||
}
|
||||
|
||||
define <4 x i32> @widen_low_i16x8_s(<8 x i16> %v) {
|
||||
; CHECK-LABEL: widen_low_i16x8_s:
|
||||
; CHECK: .functype widen_low_i16x8_s (v128) -> (v128)
|
||||
define <4 x i32> @extend_low_i16x8_s(<8 x i16> %v) {
|
||||
; CHECK-LABEL: extend_low_i16x8_s:
|
||||
; CHECK: .functype extend_low_i16x8_s (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i32x4.widen_low_i16x8_s
|
||||
; CHECK-NEXT: i32x4.extend_low_i16x8_s
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%widened = sext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %widened
|
||||
%extended = sext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %extended
|
||||
}
|
||||
|
||||
define <4 x i32> @widen_low_i16x8_u(<8 x i16> %v) {
|
||||
; CHECK-LABEL: widen_low_i16x8_u:
|
||||
; CHECK: .functype widen_low_i16x8_u (v128) -> (v128)
|
||||
define <4 x i32> @extend_low_i16x8_u(<8 x i16> %v) {
|
||||
; CHECK-LABEL: extend_low_i16x8_u:
|
||||
; CHECK: .functype extend_low_i16x8_u (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i32x4.widen_low_i16x8_u
|
||||
; CHECK-NEXT: i32x4.extend_low_i16x8_u
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%widened = zext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %widened
|
||||
%extended = zext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %extended
|
||||
}
|
||||
|
||||
define <4 x i32> @widen_high_i16x8_s(<8 x i16> %v) {
|
||||
; CHECK-LABEL: widen_high_i16x8_s:
|
||||
; CHECK: .functype widen_high_i16x8_s (v128) -> (v128)
|
||||
define <4 x i32> @extend_high_i16x8_s(<8 x i16> %v) {
|
||||
; CHECK-LABEL: extend_high_i16x8_s:
|
||||
; CHECK: .functype extend_high_i16x8_s (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i32x4.widen_high_i16x8_s
|
||||
; CHECK-NEXT: i32x4.extend_high_i16x8_s
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%widened = sext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %widened
|
||||
%extended = sext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %extended
|
||||
}
|
||||
|
||||
define <4 x i32> @widen_high_i16x8_u(<8 x i16> %v) {
|
||||
; CHECK-LABEL: widen_high_i16x8_u:
|
||||
; CHECK: .functype widen_high_i16x8_u (v128) -> (v128)
|
||||
define <4 x i32> @extend_high_i16x8_u(<8 x i16> %v) {
|
||||
; CHECK-LABEL: extend_high_i16x8_u:
|
||||
; CHECK: .functype extend_high_i16x8_u (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i32x4.widen_high_i16x8_u
|
||||
; CHECK-NEXT: i32x4.extend_high_i16x8_u
|
||||
; CHECK-NEXT: # fallthrough-return
|
||||
%low = shufflevector <8 x i16> %v, <8 x i16> undef,
|
||||
<4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
%widened = zext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %widened
|
||||
%extended = zext <4 x i16> %low to <4 x i32>
|
||||
ret <4 x i32> %extended
|
||||
}
|
||||
|
||||
;; Also test that similar patterns with offsets not corresponding to
|
||||
;; the low or high half are correctly expanded.
|
||||
|
||||
define <8 x i16> @widen_lowish_i8x16_s(<16 x i8> %v) {
|
||||
; CHECK-LABEL: widen_lowish_i8x16_s:
|
||||
; CHECK: .functype widen_lowish_i8x16_s (v128) -> (v128)
|
||||
define <8 x i16> @extend_lowish_i8x16_s(<16 x i8> %v) {
|
||||
; CHECK-LABEL: extend_lowish_i8x16_s:
|
||||
; CHECK: .functype extend_lowish_i8x16_s (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i8x16.extract_lane_u 1
|
||||
|
@ -148,13 +148,13 @@ define <8 x i16> @widen_lowish_i8x16_s(<16 x i8> %v) {
|
|||
; CHECK-NEXT: # fallthrough-return
|
||||
%lowish = shufflevector <16 x i8> %v, <16 x i8> undef,
|
||||
<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
|
||||
%widened = sext <8 x i8> %lowish to <8 x i16>
|
||||
ret <8 x i16> %widened
|
||||
%extended = sext <8 x i8> %lowish to <8 x i16>
|
||||
ret <8 x i16> %extended
|
||||
}
|
||||
|
||||
define <4 x i32> @widen_lowish_i16x8_s(<8 x i16> %v) {
|
||||
; CHECK-LABEL: widen_lowish_i16x8_s:
|
||||
; CHECK: .functype widen_lowish_i16x8_s (v128) -> (v128)
|
||||
define <4 x i32> @extend_lowish_i16x8_s(<8 x i16> %v) {
|
||||
; CHECK-LABEL: extend_lowish_i16x8_s:
|
||||
; CHECK: .functype extend_lowish_i16x8_s (v128) -> (v128)
|
||||
; CHECK-NEXT: # %bb.0:
|
||||
; CHECK-NEXT: local.get 0
|
||||
; CHECK-NEXT: i16x8.extract_lane_u 1
|
||||
|
@ -175,6 +175,6 @@ define <4 x i32> @widen_lowish_i16x8_s(<8 x i16> %v) {
|
|||
; CHECK-NEXT: # fallthrough-return
|
||||
%lowish = shufflevector <8 x i16> %v, <8 x i16> undef,
|
||||
<4 x i32> <i32 1, i32 2, i32 3, i32 4>
|
||||
%widened = sext <4 x i16> %lowish to <4 x i32>
|
||||
ret <4 x i32> %widened
|
||||
%extended = sext <4 x i16> %lowish to <4 x i32>
|
||||
ret <4 x i32> %extended
|
||||
}
|
|
@ -23,7 +23,7 @@ define <16 x i8> @swizzle_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
|||
|
||||
; CHECK-LABEL: add_sat_s_v16i8:
|
||||
; CHECK-NEXT: .functype add_sat_s_v16i8 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i8x16.add_saturate_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i8x16.add_sat_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
|
||||
define <16 x i8> @add_sat_s_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
||||
|
@ -33,7 +33,7 @@ define <16 x i8> @add_sat_s_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
|||
|
||||
; CHECK-LABEL: add_sat_u_v16i8:
|
||||
; CHECK-NEXT: .functype add_sat_u_v16i8 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i8x16.add_saturate_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i8x16.add_sat_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
|
||||
define <16 x i8> @add_sat_u_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
||||
|
@ -43,11 +43,11 @@ define <16 x i8> @add_sat_u_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
|||
|
||||
; CHECK-LABEL: sub_sat_s_v16i8:
|
||||
; CHECK-NEXT: .functype sub_sat_s_v16i8 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i8x16.sub_saturate_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i8x16.sub_sat_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <16 x i8> @llvm.wasm.sub.saturate.signed.v16i8(<16 x i8>, <16 x i8>)
|
||||
declare <16 x i8> @llvm.wasm.sub.sat.signed.v16i8(<16 x i8>, <16 x i8>)
|
||||
define <16 x i8> @sub_sat_s_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
||||
%a = call <16 x i8> @llvm.wasm.sub.saturate.signed.v16i8(
|
||||
%a = call <16 x i8> @llvm.wasm.sub.sat.signed.v16i8(
|
||||
<16 x i8> %x, <16 x i8> %y
|
||||
)
|
||||
ret <16 x i8> %a
|
||||
|
@ -55,11 +55,11 @@ define <16 x i8> @sub_sat_s_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
|||
|
||||
; CHECK-LABEL: sub_sat_u_v16i8:
|
||||
; CHECK-NEXT: .functype sub_sat_u_v16i8 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i8x16.sub_saturate_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i8x16.sub_sat_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <16 x i8> @llvm.wasm.sub.saturate.unsigned.v16i8(<16 x i8>, <16 x i8>)
|
||||
declare <16 x i8> @llvm.wasm.sub.sat.unsigned.v16i8(<16 x i8>, <16 x i8>)
|
||||
define <16 x i8> @sub_sat_u_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
||||
%a = call <16 x i8> @llvm.wasm.sub.saturate.unsigned.v16i8(
|
||||
%a = call <16 x i8> @llvm.wasm.sub.sat.unsigned.v16i8(
|
||||
<16 x i8> %x, <16 x i8> %y
|
||||
)
|
||||
ret <16 x i8> %a
|
||||
|
@ -186,7 +186,7 @@ define <16 x i8> @shuffle_undef_v16i8(<16 x i8> %x, <16 x i8> %y) {
|
|||
; ==============================================================================
|
||||
; CHECK-LABEL: add_sat_s_v8i16:
|
||||
; CHECK-NEXT: .functype add_sat_s_v8i16 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i16x8.add_saturate_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i16x8.add_sat_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
|
||||
define <8 x i16> @add_sat_s_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
||||
|
@ -196,7 +196,7 @@ define <8 x i16> @add_sat_s_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
|||
|
||||
; CHECK-LABEL: add_sat_u_v8i16:
|
||||
; CHECK-NEXT: .functype add_sat_u_v8i16 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i16x8.add_saturate_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i16x8.add_sat_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
|
||||
define <8 x i16> @add_sat_u_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
||||
|
@ -206,11 +206,11 @@ define <8 x i16> @add_sat_u_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
|||
|
||||
; CHECK-LABEL: sub_sat_s_v8i16:
|
||||
; CHECK-NEXT: .functype sub_sat_s_v8i16 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i16x8.sub_saturate_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i16x8.sub_sat_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <8 x i16> @llvm.wasm.sub.saturate.signed.v8i16(<8 x i16>, <8 x i16>)
|
||||
declare <8 x i16> @llvm.wasm.sub.sat.signed.v8i16(<8 x i16>, <8 x i16>)
|
||||
define <8 x i16> @sub_sat_s_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
||||
%a = call <8 x i16> @llvm.wasm.sub.saturate.signed.v8i16(
|
||||
%a = call <8 x i16> @llvm.wasm.sub.sat.signed.v8i16(
|
||||
<8 x i16> %x, <8 x i16> %y
|
||||
)
|
||||
ret <8 x i16> %a
|
||||
|
@ -218,11 +218,11 @@ define <8 x i16> @sub_sat_s_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
|||
|
||||
; CHECK-LABEL: sub_sat_u_v8i16:
|
||||
; CHECK-NEXT: .functype sub_sat_u_v8i16 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i16x8.sub_saturate_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: i16x8.sub_sat_u $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <8 x i16> @llvm.wasm.sub.saturate.unsigned.v8i16(<8 x i16>, <8 x i16>)
|
||||
declare <8 x i16> @llvm.wasm.sub.sat.unsigned.v8i16(<8 x i16>, <8 x i16>)
|
||||
define <8 x i16> @sub_sat_u_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
||||
%a = call <8 x i16> @llvm.wasm.sub.saturate.unsigned.v8i16(
|
||||
%a = call <8 x i16> @llvm.wasm.sub.sat.unsigned.v8i16(
|
||||
<8 x i16> %x, <8 x i16> %y
|
||||
)
|
||||
ret <8 x i16> %a
|
||||
|
@ -242,9 +242,9 @@ define <8 x i16> @avgr_u_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
|||
; CHECK-NEXT: .functype q15mulr_sat_s_v8i16 (v128, v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i16x8.q15mulr_sat_s $push[[R:[0-9]+]]=, $0, $1{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <8 x i16> @llvm.wasm.q15mulr.saturate.signed(<8 x i16>, <8 x i16>)
|
||||
declare <8 x i16> @llvm.wasm.q15mulr.sat.signed(<8 x i16>, <8 x i16>)
|
||||
define <8 x i16> @q15mulr_sat_s_v8i16(<8 x i16> %x, <8 x i16> %y) {
|
||||
%a = call <8 x i16> @llvm.wasm.q15mulr.saturate.signed(<8 x i16> %x,
|
||||
%a = call <8 x i16> @llvm.wasm.q15mulr.sat.signed(<8 x i16> %x,
|
||||
<8 x i16> %y)
|
||||
ret <8 x i16> %a
|
||||
}
|
||||
|
@ -534,9 +534,9 @@ define <4 x i32> @trunc_sat_u_v4i32(<4 x float> %x) {
|
|||
; CHECK-NEXT: .functype trunc_sat_zero_signed_v4i32 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i32x4.trunc_sat_zero_f64x2_s $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <4 x i32> @llvm.wasm.trunc.saturate.zero.signed(<2 x double>)
|
||||
declare <4 x i32> @llvm.wasm.trunc.sat.zero.signed(<2 x double>)
|
||||
define <4 x i32> @trunc_sat_zero_signed_v4i32(<2 x double> %a) {
|
||||
%v = call <4 x i32> @llvm.wasm.trunc.saturate.zero.signed(<2 x double> %a)
|
||||
%v = call <4 x i32> @llvm.wasm.trunc.sat.zero.signed(<2 x double> %a)
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -544,9 +544,9 @@ define <4 x i32> @trunc_sat_zero_signed_v4i32(<2 x double> %a) {
|
|||
; CHECK-NEXT: .functype trunc_sat_zero_unsigned_v4i32 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i32x4.trunc_sat_zero_f64x2_u $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <4 x i32> @llvm.wasm.trunc.saturate.zero.unsigned(<2 x double>)
|
||||
declare <4 x i32> @llvm.wasm.trunc.sat.zero.unsigned(<2 x double>)
|
||||
define <4 x i32> @trunc_sat_zero_unsigned_v4i32(<2 x double> %a) {
|
||||
%v = call <4 x i32> @llvm.wasm.trunc.saturate.zero.unsigned(<2 x double> %a)
|
||||
%v = call <4 x i32> @llvm.wasm.trunc.sat.zero.unsigned(<2 x double> %a)
|
||||
ret <4 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -563,43 +563,43 @@ define <2 x i64> @eq_v2i64(<2 x i64> %x, <2 x i64> %y) {
|
|||
ret <2 x i64> %a
|
||||
}
|
||||
|
||||
; CHECK-LABEL: widen_low_s_v2i64:
|
||||
; CHECK-NEXT: .functype widen_low_s_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.widen_low_i32x4_s $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-LABEL: extend_low_s_v2i64:
|
||||
; CHECK-NEXT: .functype extend_low_s_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.extend_low_i32x4_s $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <2 x i64> @llvm.wasm.widen.low.signed(<4 x i32>)
|
||||
define <2 x i64> @widen_low_s_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.widen.low.signed(<4 x i32> %x)
|
||||
declare <2 x i64> @llvm.wasm.extend.low.signed(<4 x i32>)
|
||||
define <2 x i64> @extend_low_s_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.extend.low.signed(<4 x i32> %x)
|
||||
ret <2 x i64> %a
|
||||
}
|
||||
|
||||
; CHECK-LABEL: widen_high_s_v2i64:
|
||||
; CHECK-NEXT: .functype widen_high_s_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.widen_high_i32x4_s $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-LABEL: extend_high_s_v2i64:
|
||||
; CHECK-NEXT: .functype extend_high_s_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.extend_high_i32x4_s $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <2 x i64> @llvm.wasm.widen.high.signed(<4 x i32>)
|
||||
define <2 x i64> @widen_high_s_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.widen.high.signed(<4 x i32> %x)
|
||||
declare <2 x i64> @llvm.wasm.extend.high.signed(<4 x i32>)
|
||||
define <2 x i64> @extend_high_s_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.extend.high.signed(<4 x i32> %x)
|
||||
ret <2 x i64> %a
|
||||
}
|
||||
|
||||
; CHECK-LABEL: widen_low_u_v2i64:
|
||||
; CHECK-NEXT: .functype widen_low_u_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.widen_low_i32x4_u $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-LABEL: extend_low_u_v2i64:
|
||||
; CHECK-NEXT: .functype extend_low_u_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.extend_low_i32x4_u $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <2 x i64> @llvm.wasm.widen.low.unsigned(<4 x i32>)
|
||||
define <2 x i64> @widen_low_u_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.widen.low.unsigned(<4 x i32> %x)
|
||||
declare <2 x i64> @llvm.wasm.extend.low.unsigned(<4 x i32>)
|
||||
define <2 x i64> @extend_low_u_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.extend.low.unsigned(<4 x i32> %x)
|
||||
ret <2 x i64> %a
|
||||
}
|
||||
|
||||
; CHECK-LABEL: widen_high_u_v2i64:
|
||||
; CHECK-NEXT: .functype widen_high_u_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.widen_high_i32x4_u $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-LABEL: extend_high_u_v2i64:
|
||||
; CHECK-NEXT: .functype extend_high_u_v2i64 (v128) -> (v128){{$}}
|
||||
; CHECK-NEXT: i64x2.extend_high_i32x4_u $push[[R:[0-9]+]]=, $0{{$}}
|
||||
; CHECK-NEXT: return $pop[[R]]{{$}}
|
||||
declare <2 x i64> @llvm.wasm.widen.high.unsigned(<4 x i32>)
|
||||
define <2 x i64> @widen_high_u_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.widen.high.unsigned(<4 x i32> %x)
|
||||
declare <2 x i64> @llvm.wasm.extend.high.unsigned(<4 x i32>)
|
||||
define <2 x i64> @extend_high_u_v2i64(<4 x i32> %x) {
|
||||
%a = call <2 x i64> @llvm.wasm.extend.high.unsigned(<4 x i32> %x)
|
||||
ret <2 x i64> %a
|
||||
}
|
||||
|
||||
|
|
|
@ -280,38 +280,51 @@ main:
|
|||
# CHECK: v128.bitselect # encoding: [0xfd,0x52]
|
||||
v128.bitselect
|
||||
|
||||
# CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01]
|
||||
# TODO: v128.any_true # encoding: [0xfd,0x53]
|
||||
|
||||
# CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x54,0x00,0x20,0x01]
|
||||
v128.load8_lane 32, 1
|
||||
|
||||
# CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01]
|
||||
# CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x55,0x01,0x20,0x01]
|
||||
v128.load16_lane 32, 1
|
||||
|
||||
# CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01]
|
||||
# CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x56,0x02,0x20,0x01]
|
||||
v128.load32_lane 32, 1
|
||||
|
||||
# CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01]
|
||||
# CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x57,0x03,0x20,0x01]
|
||||
v128.load64_lane 32, 1
|
||||
|
||||
# CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x5c,0x00,0x20,0x01]
|
||||
# CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01]
|
||||
v128.store8_lane 32, 1
|
||||
|
||||
# CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x5d,0x01,0x20,0x01]
|
||||
# CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01]
|
||||
v128.store16_lane 32, 1
|
||||
|
||||
# CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5e,0x02,0x20,0x01]
|
||||
# CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01]
|
||||
v128.store32_lane 32, 1
|
||||
|
||||
# CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5f,0x03,0x20,0x01]
|
||||
# CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01]
|
||||
v128.store64_lane 32, 1
|
||||
|
||||
# CHECK: v128.load32_zero 32 # encoding: [0xfd,0x5c,0x02,0x20]
|
||||
v128.load32_zero 32
|
||||
|
||||
# CHECK: v128.load64_zero 32 # encoding: [0xfd,0x5d,0x03,0x20]
|
||||
v128.load64_zero 32
|
||||
|
||||
# CHECK: f32x4.demote_zero_f64x2 # encoding: [0xfd,0x5e]
|
||||
f32x4.demote_zero_f64x2
|
||||
|
||||
# CHECK: f64x2.promote_low_f32x4 # encoding: [0xfd,0x5f]
|
||||
f64x2.promote_low_f32x4
|
||||
|
||||
# CHECK: i8x16.abs # encoding: [0xfd,0x60]
|
||||
i8x16.abs
|
||||
|
||||
# CHECK: i8x16.neg # encoding: [0xfd,0x61]
|
||||
i8x16.neg
|
||||
|
||||
# CHECK: i8x16.any_true # encoding: [0xfd,0x62]
|
||||
i8x16.any_true
|
||||
# TODO: i8x16.popcnt # encoding: [0xfd,0x62]
|
||||
|
||||
# CHECK: i8x16.all_true # encoding: [0xfd,0x63]
|
||||
i8x16.all_true
|
||||
|
@ -325,6 +338,18 @@ main:
|
|||
# CHECK: i8x16.narrow_i16x8_u # encoding: [0xfd,0x66]
|
||||
i8x16.narrow_i16x8_u
|
||||
|
||||
# CHECK: f32x4.ceil # encoding: [0xfd,0x67]
|
||||
f32x4.ceil
|
||||
|
||||
# CHECK: f32x4.floor # encoding: [0xfd,0x68]
|
||||
f32x4.floor
|
||||
|
||||
# CHECK: f32x4.trunc # encoding: [0xfd,0x69]
|
||||
f32x4.trunc
|
||||
|
||||
# CHECK: f32x4.nearest # encoding: [0xfd,0x6a]
|
||||
f32x4.nearest
|
||||
|
||||
# CHECK: i8x16.shl # encoding: [0xfd,0x6b]
|
||||
i8x16.shl
|
||||
|
||||
|
@ -337,20 +362,26 @@ main:
|
|||
# CHECK: i8x16.add # encoding: [0xfd,0x6e]
|
||||
i8x16.add
|
||||
|
||||
# CHECK: i8x16.add_saturate_s # encoding: [0xfd,0x6f]
|
||||
i8x16.add_saturate_s
|
||||
# CHECK: i8x16.add_sat_s # encoding: [0xfd,0x6f]
|
||||
i8x16.add_sat_s
|
||||
|
||||
# CHECK: i8x16.add_saturate_u # encoding: [0xfd,0x70]
|
||||
i8x16.add_saturate_u
|
||||
# CHECK: i8x16.add_sat_u # encoding: [0xfd,0x70]
|
||||
i8x16.add_sat_u
|
||||
|
||||
# CHECK: i8x16.sub # encoding: [0xfd,0x71]
|
||||
i8x16.sub
|
||||
|
||||
# CHECK: i8x16.sub_saturate_s # encoding: [0xfd,0x72]
|
||||
i8x16.sub_saturate_s
|
||||
# CHECK: i8x16.sub_sat_s # encoding: [0xfd,0x72]
|
||||
i8x16.sub_sat_s
|
||||
|
||||
# CHECK: i8x16.sub_saturate_u # encoding: [0xfd,0x73]
|
||||
i8x16.sub_saturate_u
|
||||
# CHECK: i8x16.sub_sat_u # encoding: [0xfd,0x73]
|
||||
i8x16.sub_sat_u
|
||||
|
||||
# CHECK: f64x2.ceil # encoding: [0xfd,0x74]
|
||||
f64x2.ceil
|
||||
|
||||
# CHECK: f64x2.floor # encoding: [0xfd,0x75]
|
||||
f64x2.floor
|
||||
|
||||
# CHECK: i8x16.min_s # encoding: [0xfd,0x76]
|
||||
i8x16.min_s
|
||||
|
@ -364,11 +395,23 @@ main:
|
|||
# CHECK: i8x16.max_u # encoding: [0xfd,0x79]
|
||||
i8x16.max_u
|
||||
|
||||
# CHECK: f64x2.trunc # encoding: [0xfd,0x7a]
|
||||
f64x2.trunc
|
||||
|
||||
# CHECK: i8x16.avgr_u # encoding: [0xfd,0x7b]
|
||||
i8x16.avgr_u
|
||||
|
||||
# CHECK: i8x16.popcnt # encoding: [0xfd,0x7c]
|
||||
i8x16.popcnt
|
||||
# CHECK: i16x8.extadd_pairwise_i8x16_s # encoding: [0xfd,0x7c]
|
||||
i16x8.extadd_pairwise_i8x16_s
|
||||
|
||||
# CHECK: i16x8.extadd_pairwise_i8x16_u # encoding: [0xfd,0x7d]
|
||||
i16x8.extadd_pairwise_i8x16_u
|
||||
|
||||
# CHECK: i32x4.extadd_pairwise_i16x8_s # encoding: [0xfd,0x7e]
|
||||
i32x4.extadd_pairwise_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extadd_pairwise_i16x8_u # encoding: [0xfd,0x7f]
|
||||
i32x4.extadd_pairwise_i16x8_u
|
||||
|
||||
# CHECK: i16x8.abs # encoding: [0xfd,0x80,0x01]
|
||||
i16x8.abs
|
||||
|
@ -376,8 +419,8 @@ main:
|
|||
# CHECK: i16x8.neg # encoding: [0xfd,0x81,0x01]
|
||||
i16x8.neg
|
||||
|
||||
# CHECK: i16x8.any_true # encoding: [0xfd,0x82,0x01]
|
||||
i16x8.any_true
|
||||
# CHECK: i16x8.q15mulr_sat_s # encoding: [0xfd,0x82,0x01]
|
||||
i16x8.q15mulr_sat_s
|
||||
|
||||
# CHECK: i16x8.all_true # encoding: [0xfd,0x83,0x01]
|
||||
i16x8.all_true
|
||||
|
@ -391,17 +434,17 @@ main:
|
|||
# CHECK: i16x8.narrow_i32x4_u # encoding: [0xfd,0x86,0x01]
|
||||
i16x8.narrow_i32x4_u
|
||||
|
||||
# CHECK: i16x8.widen_low_i8x16_s # encoding: [0xfd,0x87,0x01]
|
||||
i16x8.widen_low_i8x16_s
|
||||
# CHECK: i16x8.extend_low_i8x16_s # encoding: [0xfd,0x87,0x01]
|
||||
i16x8.extend_low_i8x16_s
|
||||
|
||||
# CHECK: i16x8.widen_high_i8x16_s # encoding: [0xfd,0x88,0x01]
|
||||
i16x8.widen_high_i8x16_s
|
||||
# CHECK: i16x8.extend_high_i8x16_s # encoding: [0xfd,0x88,0x01]
|
||||
i16x8.extend_high_i8x16_s
|
||||
|
||||
# CHECK: i16x8.widen_low_i8x16_u # encoding: [0xfd,0x89,0x01]
|
||||
i16x8.widen_low_i8x16_u
|
||||
# CHECK: i16x8.extend_low_i8x16_u # encoding: [0xfd,0x89,0x01]
|
||||
i16x8.extend_low_i8x16_u
|
||||
|
||||
# CHECK: i16x8.widen_high_i8x16_u # encoding: [0xfd,0x8a,0x01]
|
||||
i16x8.widen_high_i8x16_u
|
||||
# CHECK: i16x8.extend_high_i8x16_u # encoding: [0xfd,0x8a,0x01]
|
||||
i16x8.extend_high_i8x16_u
|
||||
|
||||
# CHECK: i16x8.shl # encoding: [0xfd,0x8b,0x01]
|
||||
i16x8.shl
|
||||
|
@ -415,20 +458,23 @@ main:
|
|||
# CHECK: i16x8.add # encoding: [0xfd,0x8e,0x01]
|
||||
i16x8.add
|
||||
|
||||
# CHECK: i16x8.add_saturate_s # encoding: [0xfd,0x8f,0x01]
|
||||
i16x8.add_saturate_s
|
||||
# CHECK: i16x8.add_sat_s # encoding: [0xfd,0x8f,0x01]
|
||||
i16x8.add_sat_s
|
||||
|
||||
# CHECK: i16x8.add_saturate_u # encoding: [0xfd,0x90,0x01]
|
||||
i16x8.add_saturate_u
|
||||
# CHECK: i16x8.add_sat_u # encoding: [0xfd,0x90,0x01]
|
||||
i16x8.add_sat_u
|
||||
|
||||
# CHECK: i16x8.sub # encoding: [0xfd,0x91,0x01]
|
||||
i16x8.sub
|
||||
|
||||
# CHECK: i16x8.sub_saturate_s # encoding: [0xfd,0x92,0x01]
|
||||
i16x8.sub_saturate_s
|
||||
# CHECK: i16x8.sub_sat_s # encoding: [0xfd,0x92,0x01]
|
||||
i16x8.sub_sat_s
|
||||
|
||||
# CHECK: i16x8.sub_saturate_u # encoding: [0xfd,0x93,0x01]
|
||||
i16x8.sub_saturate_u
|
||||
# CHECK: i16x8.sub_sat_u # encoding: [0xfd,0x93,0x01]
|
||||
i16x8.sub_sat_u
|
||||
|
||||
# CHECK: f64x2.nearest # encoding: [0xfd,0x94,0x01]
|
||||
f64x2.nearest
|
||||
|
||||
# CHECK: i16x8.mul # encoding: [0xfd,0x95,0x01]
|
||||
i16x8.mul
|
||||
|
@ -445,11 +491,22 @@ main:
|
|||
# CHECK: i16x8.max_u # encoding: [0xfd,0x99,0x01]
|
||||
i16x8.max_u
|
||||
|
||||
# 0x0a unused
|
||||
|
||||
# CHECK: i16x8.avgr_u # encoding: [0xfd,0x9b,0x01]
|
||||
i16x8.avgr_u
|
||||
|
||||
# CHECK: i16x8.q15mulr_sat_s # encoding: [0xfd,0x9c,0x01]
|
||||
i16x8.q15mulr_sat_s
|
||||
# CHECK: i16x8.extmul_low_i8x16_s # encoding: [0xfd,0x9c,0x01]
|
||||
i16x8.extmul_low_i8x16_s
|
||||
|
||||
# CHECK: i16x8.extmul_high_i8x16_s # encoding: [0xfd,0x9d,0x01]
|
||||
i16x8.extmul_high_i8x16_s
|
||||
|
||||
# CHECK: i16x8.extmul_low_i8x16_u # encoding: [0xfd,0x9e,0x01]
|
||||
i16x8.extmul_low_i8x16_u
|
||||
|
||||
# CHECK: i16x8.extmul_high_i8x16_u # encoding: [0xfd,0x9f,0x01]
|
||||
i16x8.extmul_high_i8x16_u
|
||||
|
||||
# CHECK: i32x4.abs # encoding: [0xfd,0xa0,0x01]
|
||||
i32x4.abs
|
||||
|
@ -457,8 +514,7 @@ main:
|
|||
# CHECK: i32x4.neg # encoding: [0xfd,0xa1,0x01]
|
||||
i32x4.neg
|
||||
|
||||
# CHECK: i32x4.any_true # encoding: [0xfd,0xa2,0x01]
|
||||
i32x4.any_true
|
||||
# 0xa2 unused
|
||||
|
||||
# CHECK: i32x4.all_true # encoding: [0xfd,0xa3,0x01]
|
||||
i32x4.all_true
|
||||
|
@ -466,17 +522,21 @@ main:
|
|||
# CHECK: i32x4.bitmask # encoding: [0xfd,0xa4,0x01]
|
||||
i32x4.bitmask
|
||||
|
||||
# CHECK: i32x4.widen_low_i16x8_s # encoding: [0xfd,0xa7,0x01]
|
||||
i32x4.widen_low_i16x8_s
|
||||
# 0xa5 unused
|
||||
|
||||
# CHECK: i32x4.widen_high_i16x8_s # encoding: [0xfd,0xa8,0x01]
|
||||
i32x4.widen_high_i16x8_s
|
||||
# 0xa6 unused
|
||||
|
||||
# CHECK: i32x4.widen_low_i16x8_u # encoding: [0xfd,0xa9,0x01]
|
||||
i32x4.widen_low_i16x8_u
|
||||
# CHECK: i32x4.extend_low_i16x8_s # encoding: [0xfd,0xa7,0x01]
|
||||
i32x4.extend_low_i16x8_s
|
||||
|
||||
# CHECK: i32x4.widen_high_i16x8_u # encoding: [0xfd,0xaa,0x01]
|
||||
i32x4.widen_high_i16x8_u
|
||||
# CHECK: i32x4.extend_high_i16x8_s # encoding: [0xfd,0xa8,0x01]
|
||||
i32x4.extend_high_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extend_low_i16x8_u # encoding: [0xfd,0xa9,0x01]
|
||||
i32x4.extend_low_i16x8_u
|
||||
|
||||
# CHECK: i32x4.extend_high_i16x8_u # encoding: [0xfd,0xaa,0x01]
|
||||
i32x4.extend_high_i16x8_u
|
||||
|
||||
# CHECK: i32x4.shl # encoding: [0xfd,0xab,0x01]
|
||||
i32x4.shl
|
||||
|
@ -490,9 +550,19 @@ main:
|
|||
# CHECK: i32x4.add # encoding: [0xfd,0xae,0x01]
|
||||
i32x4.add
|
||||
|
||||
# 0xaf unused
|
||||
|
||||
# 0xb0 unused
|
||||
|
||||
# CHECK: i32x4.sub # encoding: [0xfd,0xb1,0x01]
|
||||
i32x4.sub
|
||||
|
||||
# 0xb2 unused
|
||||
|
||||
# 0xb3 unused
|
||||
|
||||
# 0xb4 unused
|
||||
|
||||
# CHECK: i32x4.mul # encoding: [0xfd,0xb5,0x01]
|
||||
i32x4.mul
|
||||
|
||||
|
@ -511,14 +581,26 @@ main:
|
|||
# CHECK: i32x4.dot_i16x8_s # encoding: [0xfd,0xba,0x01]
|
||||
i32x4.dot_i16x8_s
|
||||
|
||||
# CHECK: i64x2.eq # encoding: [0xfd,0xc0,0x01]
|
||||
i64x2.eq
|
||||
# 0xbb unused
|
||||
|
||||
# CHECK: i32x4.extmul_low_i16x8_s # encoding: [0xfd,0xbc,0x01]
|
||||
i32x4.extmul_low_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extmul_high_i16x8_s # encoding: [0xfd,0xbd,0x01]
|
||||
i32x4.extmul_high_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extmul_low_i16x8_u # encoding: [0xfd,0xbe,0x01]
|
||||
i32x4.extmul_low_i16x8_u
|
||||
|
||||
# CHECK: i32x4.extmul_high_i16x8_u # encoding: [0xfd,0xbf,0x01]
|
||||
i32x4.extmul_high_i16x8_u
|
||||
|
||||
# TODO: i64x2.abs # encoding: [0xfd,0xc0,0x01]
|
||||
|
||||
# CHECK: i64x2.neg # encoding: [0xfd,0xc1,0x01]
|
||||
i64x2.neg
|
||||
|
||||
# CHECK: i64x2.any_true # encoding: [0xfd,0xc2,0x01]
|
||||
i64x2.any_true
|
||||
# 0xc2 unused
|
||||
|
||||
# CHECK: i64x2.all_true # encoding: [0xfd,0xc3,0x01]
|
||||
i64x2.all_true
|
||||
|
@ -526,17 +608,21 @@ main:
|
|||
# CHECK: i64x2.bitmask # encoding: [0xfd,0xc4,0x01]
|
||||
i64x2.bitmask
|
||||
|
||||
# CHECK: i64x2.widen_low_i32x4_s # encoding: [0xfd,0xc7,0x01]
|
||||
i64x2.widen_low_i32x4_s
|
||||
# 0xc5 unused
|
||||
|
||||
# CHECK: i64x2.widen_high_i32x4_s # encoding: [0xfd,0xc8,0x01]
|
||||
i64x2.widen_high_i32x4_s
|
||||
# 0xc6 unused
|
||||
|
||||
# CHECK: i64x2.widen_low_i32x4_u # encoding: [0xfd,0xc9,0x01]
|
||||
i64x2.widen_low_i32x4_u
|
||||
# CHECK: i64x2.extend_low_i32x4_s # encoding: [0xfd,0xc7,0x01]
|
||||
i64x2.extend_low_i32x4_s
|
||||
|
||||
# CHECK: i64x2.widen_high_i32x4_u # encoding: [0xfd,0xca,0x01]
|
||||
i64x2.widen_high_i32x4_u
|
||||
# CHECK: i64x2.extend_high_i32x4_s # encoding: [0xfd,0xc8,0x01]
|
||||
i64x2.extend_high_i32x4_s
|
||||
|
||||
# CHECK: i64x2.extend_low_i32x4_u # encoding: [0xfd,0xc9,0x01]
|
||||
i64x2.extend_low_i32x4_u
|
||||
|
||||
# CHECK: i64x2.extend_high_i32x4_u # encoding: [0xfd,0xca,0x01]
|
||||
i64x2.extend_high_i32x4_u
|
||||
|
||||
# CHECK: i64x2.shl # encoding: [0xfd,0xcb,0x01]
|
||||
i64x2.shl
|
||||
|
@ -550,35 +636,45 @@ main:
|
|||
# CHECK: i64x2.add # encoding: [0xfd,0xce,0x01]
|
||||
i64x2.add
|
||||
|
||||
# 0xcf unused
|
||||
|
||||
# 0xd0 unused
|
||||
|
||||
# CHECK: i64x2.sub # encoding: [0xfd,0xd1,0x01]
|
||||
i64x2.sub
|
||||
|
||||
# 0xd2 unused
|
||||
|
||||
# 0xd3 unused
|
||||
|
||||
# 0xd4 unused
|
||||
|
||||
# CHECK: i64x2.mul # encoding: [0xfd,0xd5,0x01]
|
||||
i64x2.mul
|
||||
|
||||
# CHECK: f32x4.ceil # encoding: [0xfd,0xd8,0x01]
|
||||
f32x4.ceil
|
||||
# TODO: i64x2.eq # encoding: [0xfd,0xd6,0x01]
|
||||
|
||||
# CHECK: f32x4.floor # encoding: [0xfd,0xd9,0x01]
|
||||
f32x4.floor
|
||||
# TODO: i64x2.ne # encoding: [0xfd,0xd7,0x01]
|
||||
|
||||
# CHECK: f32x4.trunc # encoding: [0xfd,0xda,0x01]
|
||||
f32x4.trunc
|
||||
# TODO: i64x2.lt_s # encoding: [0xfd,0xd8,0x01]
|
||||
|
||||
# CHECK: f32x4.nearest # encoding: [0xfd,0xdb,0x01]
|
||||
f32x4.nearest
|
||||
# TODO: i64x2.gt_s # encoding: [0xfd,0xd9,0x01]
|
||||
|
||||
# CHECK: f64x2.ceil # encoding: [0xfd,0xdc,0x01]
|
||||
f64x2.ceil
|
||||
# TODO: i64x2.le_s # encoding: [0xfd,0xda,0x01]
|
||||
|
||||
# CHECK: f64x2.floor # encoding: [0xfd,0xdd,0x01]
|
||||
f64x2.floor
|
||||
# TODO: i64x2.ge_s # encoding: [0xfd,0xdb,0x01]
|
||||
|
||||
# CHECK: f64x2.trunc # encoding: [0xfd,0xde,0x01]
|
||||
f64x2.trunc
|
||||
# CHECK: i64x2.extmul_low_i32x4_s # encoding: [0xfd,0xdc,0x01]
|
||||
i64x2.extmul_low_i32x4_s
|
||||
|
||||
# CHECK: f64x2.nearest # encoding: [0xfd,0xdf,0x01]
|
||||
f64x2.nearest
|
||||
# CHECK: i64x2.extmul_high_i32x4_s # encoding: [0xfd,0xdd,0x01]
|
||||
i64x2.extmul_high_i32x4_s
|
||||
|
||||
# CHECK: i64x2.extmul_low_i32x4_u # encoding: [0xfd,0xde,0x01]
|
||||
i64x2.extmul_low_i32x4_u
|
||||
|
||||
# CHECK: i64x2.extmul_high_i32x4_u # encoding: [0xfd,0xdf,0x01]
|
||||
i64x2.extmul_high_i32x4_u
|
||||
|
||||
# CHECK: f32x4.abs # encoding: [0xfd,0xe0,0x01]
|
||||
f32x4.abs
|
||||
|
@ -586,6 +682,8 @@ main:
|
|||
# CHECK: f32x4.neg # encoding: [0xfd,0xe1,0x01]
|
||||
f32x4.neg
|
||||
|
||||
# 0xe2 unused
|
||||
|
||||
# CHECK: f32x4.sqrt # encoding: [0xfd,0xe3,0x01]
|
||||
f32x4.sqrt
|
||||
|
||||
|
@ -619,6 +717,8 @@ main:
|
|||
# CHECK: f64x2.neg # encoding: [0xfd,0xed,0x01]
|
||||
f64x2.neg
|
||||
|
||||
# 0xee unused
|
||||
|
||||
# CHECK: f64x2.sqrt # encoding: [0xfd,0xef,0x01]
|
||||
f64x2.sqrt
|
||||
|
||||
|
@ -658,76 +758,16 @@ main:
|
|||
# CHECK: f32x4.convert_i32x4_u # encoding: [0xfd,0xfb,0x01]
|
||||
f32x4.convert_i32x4_u
|
||||
|
||||
# CHECK: v128.load32_zero 32 # encoding: [0xfd,0xfc,0x01,0x02,0x20]
|
||||
v128.load32_zero 32
|
||||
|
||||
# CHECK: v128.load64_zero 32 # encoding: [0xfd,0xfd,0x01,0x03,0x20]
|
||||
v128.load64_zero 32
|
||||
|
||||
# CHECK: i16x8.extmul_low_i8x16_s # encoding: [0xfd,0x9a,0x01]
|
||||
i16x8.extmul_low_i8x16_s
|
||||
|
||||
# CHECK: i16x8.extmul_high_i8x16_s # encoding: [0xfd,0x9d,0x01]
|
||||
i16x8.extmul_high_i8x16_s
|
||||
|
||||
# CHECK: i16x8.extmul_low_i8x16_u # encoding: [0xfd,0x9e,0x01]
|
||||
i16x8.extmul_low_i8x16_u
|
||||
|
||||
# CHECK: i16x8.extmul_high_i8x16_u # encoding: [0xfd,0x9f,0x01]
|
||||
i16x8.extmul_high_i8x16_u
|
||||
|
||||
# CHECK: i32x4.extmul_low_i16x8_s # encoding: [0xfd,0xbb,0x01]
|
||||
i32x4.extmul_low_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extmul_high_i16x8_s # encoding: [0xfd,0xbd,0x01]
|
||||
i32x4.extmul_high_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extmul_low_i16x8_u # encoding: [0xfd,0xbe,0x01]
|
||||
i32x4.extmul_low_i16x8_u
|
||||
|
||||
# CHECK: i32x4.extmul_high_i16x8_u # encoding: [0xfd,0xbf,0x01]
|
||||
i32x4.extmul_high_i16x8_u
|
||||
|
||||
# CHECK: i64x2.extmul_low_i32x4_s # encoding: [0xfd,0xd2,0x01]
|
||||
i64x2.extmul_low_i32x4_s
|
||||
|
||||
# CHECK: i64x2.extmul_high_i32x4_s # encoding: [0xfd,0xd3,0x01]
|
||||
i64x2.extmul_high_i32x4_s
|
||||
|
||||
# CHECK: i64x2.extmul_low_i32x4_u # encoding: [0xfd,0xd6,0x01]
|
||||
i64x2.extmul_low_i32x4_u
|
||||
|
||||
# CHECK: i64x2.extmul_high_i32x4_u # encoding: [0xfd,0xd7,0x01]
|
||||
i64x2.extmul_high_i32x4_u
|
||||
|
||||
# CHECK: i16x8.extadd_pairwise_i8x16_s # encoding: [0xfd,0xc2,0x01]
|
||||
i16x8.extadd_pairwise_i8x16_s
|
||||
|
||||
# CHECK: i16x8.extadd_pairwise_i8x16_u # encoding: [0xfd,0xc3,0x01]
|
||||
i16x8.extadd_pairwise_i8x16_u
|
||||
|
||||
# CHECK: i32x4.extadd_pairwise_i16x8_s # encoding: [0xfd,0xa5,0x01]
|
||||
i32x4.extadd_pairwise_i16x8_s
|
||||
|
||||
# CHECK: i32x4.extadd_pairwise_i16x8_u # encoding: [0xfd,0xa6,0x01]
|
||||
i32x4.extadd_pairwise_i16x8_u
|
||||
|
||||
# CHECK: f64x2.convert_low_i32x4_s # encoding: [0xfd,0x53]
|
||||
f64x2.convert_low_i32x4_s
|
||||
|
||||
# CHECK: f64x2.convert_low_i32x4_u # encoding: [0xfd,0x54]
|
||||
f64x2.convert_low_i32x4_u
|
||||
|
||||
# CHECK: i32x4.trunc_sat_zero_f64x2_s # encoding: [0xfd,0x55]
|
||||
# CHECK: i32x4.trunc_sat_zero_f64x2_s # encoding: [0xfd,0xfc,0x01]
|
||||
i32x4.trunc_sat_zero_f64x2_s
|
||||
|
||||
# CHECK: i32x4.trunc_sat_zero_f64x2_u # encoding: [0xfd,0x56]
|
||||
# CHECK: i32x4.trunc_sat_zero_f64x2_u # encoding: [0xfd,0xfd,0x01]
|
||||
i32x4.trunc_sat_zero_f64x2_u
|
||||
|
||||
# CHECK: f32x4.demote_zero_f64x2 # encoding: [0xfd,0x57]
|
||||
f32x4.demote_zero_f64x2
|
||||
# CHECK: f64x2.convert_low_i32x4_s # encoding: [0xfd,0xfe,0x01]
|
||||
f64x2.convert_low_i32x4_s
|
||||
|
||||
# CHECK: f64x2.promote_low_f32x4 # encoding: [0xfd,0x69]
|
||||
f64x2.promote_low_f32x4
|
||||
# CHECK: f64x2.convert_low_i32x4_u # encoding: [0xfd,0xff,0x01]
|
||||
f64x2.convert_low_i32x4_u
|
||||
|
||||
end_function
|
||||
|
|
Loading…
Reference in New Issue