forked from OSchip/llvm-project
[ARM][MVE] Add fixed point vector conversion intrinsics
Summary: This patch implements the following Arm ACLE MVE intrinsics: * vcvtq_n_* * vcvtq_m_n_* * vcvtq_x_n_* and two corresponding LLVM IR intrinsics: * int_arm_mve_vcvt_fix (vcvtq_n_*) * int_arm_mve_vcvt_fix_predicated (vcvtq_m_n_*, vcvtq_x_n_*) Reviewers: simon_tatham, ostannard, MarkMurrayARM, dmgreen Reviewed By: MarkMurrayARM Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D74134
This commit is contained in:
parent
2724ada8d2
commit
2694cc3dca
|
@ -1098,3 +1098,29 @@ let params = T.All in {
|
|||
def vsetq_lane: Intrinsic<Vector, (args unpromoted<Scalar>:$e, Vector:$v, imm_lane:$lane),
|
||||
(ielt_var $v, $e, $lane)>;
|
||||
}
|
||||
|
||||
foreach desttype = !listconcat(T.Int16, T.Int32, T.Float) in {
|
||||
defvar is_dest_float = !eq(desttype.kind, "f");
|
||||
defvar is_dest_unsigned = !eq(desttype.kind, "u");
|
||||
// First immediate operand of the LLVM intrinsic
|
||||
defvar unsigned_flag = !if(is_dest_float, (unsignedflag Scalar),
|
||||
!if(is_dest_unsigned, V.True, V.False));
|
||||
// For float->int conversions _n and _x_n intrinsics are not polymorphic
|
||||
// because the signedness of the destination type cannot be inferred.
|
||||
defvar pnt_nx = !if(is_dest_float, PNT_2Type, PNT_None);
|
||||
|
||||
let params = !if(is_dest_float,
|
||||
!if(!eq(desttype.size, 16), T.Int16, T.Int32),
|
||||
!if(!eq(desttype.size, 16), [f16], [f32])) in {
|
||||
let pnt = pnt_nx in
|
||||
def "vcvtq_n_"#desttype : Intrinsic<VecOf<desttype>,
|
||||
(args Vector:$a, imm_1toN:$b),
|
||||
(IRInt<"vcvt_fix", [VecOf<desttype>, Vector]> unsigned_flag, $a, $b)>;
|
||||
|
||||
defm "vcvtq" : IntrinsicMX<VecOf<desttype>,
|
||||
(args Vector:$a, imm_1toN:$b, Predicate:$p),
|
||||
(IRInt<"vcvt_fix_predicated", [VecOf<desttype>, Vector, Predicate]>
|
||||
unsigned_flag, $inactive, $a, $b, $p),
|
||||
1, "_n_"#desttype, PNT_2Type, pnt_nx>;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
|
||||
// RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
|
||||
|
||||
#include <arm_mve.h>
|
||||
|
||||
|
@ -24,3 +25,339 @@ float16x8_t test_vcvttq_m_f16_f32(float16x8_t a, float32x4_t b, mve_pred16_t p)
|
|||
{
|
||||
return vcvttq_m_f16_f32(a, b, p);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_f16_s16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 0, <8 x i16> [[A:%.*]], i32 1)
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP0]]
|
||||
//
|
||||
float16x8_t test_vcvtq_n_f16_s16(int16x8_t a)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_n(a, 1);
|
||||
#else
|
||||
return vcvtq_n_f16_s16(a, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_f16_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 1, <8 x i16> [[A:%.*]], i32 2)
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP0]]
|
||||
//
|
||||
float16x8_t test_vcvtq_n_f16_u16(uint16x8_t a)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_n(a, 2);
|
||||
#else
|
||||
return vcvtq_n_f16_u16(a, 2);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_f32_s32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32 0, <4 x i32> [[A:%.*]], i32 3)
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP0]]
|
||||
//
|
||||
float32x4_t test_vcvtq_n_f32_s32(int32x4_t a)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_n(a, 3);
|
||||
#else
|
||||
return vcvtq_n_f32_s32(a, 3);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_f32_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32 1, <4 x i32> [[A:%.*]], i32 32)
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP0]]
|
||||
//
|
||||
float32x4_t test_vcvtq_n_f32_u32(uint32x4_t a)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_n(a, 32);
|
||||
#else
|
||||
return vcvtq_n_f32_u32(a, 32);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_s16_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]], i32 1)
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
||||
//
|
||||
int16x8_t test_vcvtq_n_s16_f16(float16x8_t a)
|
||||
{
|
||||
return vcvtq_n_s16_f16(a, 1);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_u16_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]], i32 2)
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
||||
//
|
||||
uint16x8_t test_vcvtq_n_u16_f16(float16x8_t a)
|
||||
{
|
||||
return vcvtq_n_u16_f16(a, 2);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_s32_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]], i32 3)
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
int32x4_t test_vcvtq_n_s32_f32(float32x4_t a)
|
||||
{
|
||||
return vcvtq_n_s32_f32(a, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_n_u32_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]], i32 32)
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
uint32x4_t test_vcvtq_n_u32_f32(float32x4_t a)
|
||||
{
|
||||
return vcvtq_n_u32_f32(a, 32);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_f16_s16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
||||
//
|
||||
float16x8_t test_vcvtq_m_n_f16_s16(float16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 1, p);
|
||||
#else
|
||||
return vcvtq_m_n_f16_s16(inactive, a, 1, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_f16_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
||||
//
|
||||
float16x8_t test_vcvtq_m_n_f16_u16(float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 2, p);
|
||||
#else
|
||||
return vcvtq_m_n_f16_u16(inactive, a, 2, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_f32_s32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 0, <4 x float> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
||||
//
|
||||
float32x4_t test_vcvtq_m_n_f32_s32(float32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 3, p);
|
||||
#else
|
||||
return vcvtq_m_n_f32_s32(inactive, a, 3, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_f32_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 1, <4 x float> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
||||
//
|
||||
float32x4_t test_vcvtq_m_n_f32_u32(float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 32, p);
|
||||
#else
|
||||
return vcvtq_m_n_f32_u32(inactive, a, 32, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_s16_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
||||
//
|
||||
int16x8_t test_vcvtq_m_n_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 1, p);
|
||||
#else
|
||||
return vcvtq_m_n_s16_f16(inactive, a, 1, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_u16_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
||||
//
|
||||
uint16x8_t test_vcvtq_m_n_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 2, p);
|
||||
#else
|
||||
return vcvtq_m_n_u16_f16(inactive, a, 2, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_s32_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
int32x4_t test_vcvtq_m_n_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 3, p);
|
||||
#else
|
||||
return vcvtq_m_n_s32_f32(inactive, a, 3, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_m_n_u32_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
uint32x4_t test_vcvtq_m_n_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_m_n(inactive, a, 32, p);
|
||||
#else
|
||||
return vcvtq_m_n_u32_f32(inactive, a, 32, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_f16_s16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> undef, <8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
||||
//
|
||||
float16x8_t test_vcvtq_x_n_f16_s16(int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_x_n(a, 1, p);
|
||||
#else
|
||||
return vcvtq_x_n_f16_s16(a, 1, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_f16_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> undef, <8 x i16> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
||||
//
|
||||
float16x8_t test_vcvtq_x_n_f16_u16(uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_x_n(a, 2, p);
|
||||
#else
|
||||
return vcvtq_x_n_f16_u16(a, 2, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_f32_s32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 0, <4 x float> undef, <4 x i32> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
||||
//
|
||||
float32x4_t test_vcvtq_x_n_f32_s32(int32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_x_n(a, 3, p);
|
||||
#else
|
||||
return vcvtq_x_n_f32_s32(a, 3, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_f32_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 1, <4 x float> undef, <4 x i32> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
||||
//
|
||||
float32x4_t test_vcvtq_x_n_f32_u32(uint32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
#ifdef POLYMORPHIC
|
||||
return vcvtq_x_n(a, 32, p);
|
||||
#else
|
||||
return vcvtq_x_n_f32_u32(a, 32, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_s16_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], i32 1, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
||||
//
|
||||
int16x8_t test_vcvtq_x_n_s16_f16(float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_x_n_s16_f16(a, 1, p);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_u16_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], i32 2, <8 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
||||
//
|
||||
uint16x8_t test_vcvtq_x_n_u16_f16(float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_x_n_u16_f16(a, 2, p);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_s32_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], i32 3, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
int32x4_t test_vcvtq_x_n_s32_f32(float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_x_n_s32_f32(a, 3, p);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @test_vcvtq_x_n_u32_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], i32 32, <4 x i1> [[TMP1]])
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
uint32x4_t test_vcvtq_x_n_u32_f32(float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_x_n_u32_f32(a, 32, p);
|
||||
}
|
||||
|
|
|
@ -290,3 +290,20 @@ void test_vidup(void)
|
|||
vidupq_n_u16(0x12345678, 3); // expected-error {{argument should be a power of 2}}
|
||||
vidupq_n_u16(0x12345678, 7); // expected-error {{argument should be a power of 2}}
|
||||
}
|
||||
|
||||
void test_vcvtq(void)
|
||||
{
|
||||
uint16x8_t vec_u16;
|
||||
float16x8_t vec_f16;
|
||||
vcvtq_n_f16_u16(vec_u16, 0); // expected-error {{argument value 0 is outside the valid range [1, 16]}}
|
||||
vcvtq_n_f16_u16(vec_u16, 1);
|
||||
vcvtq_n_f16_u16(vec_u16, 16);
|
||||
vcvtq_n_f16_u16(vec_u16, 17); // expected-error {{argument value 17 is outside the valid range [1, 16]}}
|
||||
|
||||
int32x4_t vec_s32;
|
||||
float32x4_t vec_f32;
|
||||
vcvtq_n_s32_f32(vec_s32, -1); // expected-error {{argument value -1 is outside the valid range [1, 32]}}
|
||||
vcvtq_n_s32_f32(vec_s32, 1);
|
||||
vcvtq_n_s32_f32(vec_s32, 32);
|
||||
vcvtq_n_s32_f32(vec_s32, 33); // expected-error {{argument value 33 is outside the valid range [1, 32]}}
|
||||
}
|
||||
|
|
|
@ -1152,4 +1152,10 @@ defm int_arm_mve_vdwdup: MVEMXPredicated<
|
|||
[llvm_i32_ty /* base */, llvm_i32_ty /* limit */, llvm_i32_ty /* step */],
|
||||
LLVMMatchType<0>, llvm_anyvector_ty>;
|
||||
|
||||
// Flags:
|
||||
// * unsigned
|
||||
defm int_arm_mve_vcvt_fix: MVEMXPredicated<
|
||||
[llvm_anyvector_ty /* output */], [llvm_i32_ty],
|
||||
[llvm_anyvector_ty /* input vector */, llvm_i32_ty /* scale */],
|
||||
LLVMMatchType<0>, llvm_anyvector_ty>;
|
||||
} // end TargetPrefix
|
||||
|
|
|
@ -3438,10 +3438,10 @@ defm MVE_VABDf32 : MVE_VABD_fp_m<MVE_v4f32>;
|
|||
defm MVE_VABDf16 : MVE_VABD_fp_m<MVE_v8f16>;
|
||||
|
||||
class MVE_VCVT_fix<string suffix, bit fsi, bit U, bit op,
|
||||
Operand imm_operand_type, list<dag> pattern=[]>
|
||||
Operand imm_operand_type>
|
||||
: MVE_float<"vcvt", suffix,
|
||||
(outs MQPR:$Qd), (ins MQPR:$Qm, imm_operand_type:$imm6),
|
||||
"$Qd, $Qm, $imm6", vpred_r, "", pattern> {
|
||||
"$Qd, $Qm, $imm6", vpred_r, "", []> {
|
||||
bits<4> Qd;
|
||||
bits<6> imm6;
|
||||
|
||||
|
@ -3483,14 +3483,43 @@ class MVE_VCVT_fix_f16<string suffix, bit U, bit op>
|
|||
let Inst{20} = 0b1;
|
||||
}
|
||||
|
||||
def MVE_VCVTf16s16_fix : MVE_VCVT_fix_f16<"f16.s16", 0b0, 0b0>;
|
||||
def MVE_VCVTs16f16_fix : MVE_VCVT_fix_f16<"s16.f16", 0b0, 0b1>;
|
||||
def MVE_VCVTf16u16_fix : MVE_VCVT_fix_f16<"f16.u16", 0b1, 0b0>;
|
||||
def MVE_VCVTu16f16_fix : MVE_VCVT_fix_f16<"u16.f16", 0b1, 0b1>;
|
||||
def MVE_VCVTf32s32_fix : MVE_VCVT_fix_f32<"f32.s32", 0b0, 0b0>;
|
||||
def MVE_VCVTs32f32_fix : MVE_VCVT_fix_f32<"s32.f32", 0b0, 0b1>;
|
||||
def MVE_VCVTf32u32_fix : MVE_VCVT_fix_f32<"f32.u32", 0b1, 0b0>;
|
||||
def MVE_VCVTu32f32_fix : MVE_VCVT_fix_f32<"u32.f32", 0b1, 0b1>;
|
||||
multiclass MVE_VCVT_fix_patterns<Instruction Inst, bit U, MVEVectorVTInfo DestVTI,
|
||||
MVEVectorVTInfo SrcVTI> {
|
||||
let Predicates = [HasMVEFloat] in {
|
||||
def : Pat<(DestVTI.Vec (int_arm_mve_vcvt_fix
|
||||
(i32 U), (SrcVTI.Vec MQPR:$Qm), imm:$scale)),
|
||||
(DestVTI.Vec (Inst (SrcVTI.Vec MQPR:$Qm), imm:$scale))>;
|
||||
def : Pat<(DestVTI.Vec (int_arm_mve_vcvt_fix_predicated (i32 U),
|
||||
(DestVTI.Vec MQPR:$inactive),
|
||||
(SrcVTI.Vec MQPR:$Qm),
|
||||
imm:$scale,
|
||||
(DestVTI.Pred VCCR:$mask))),
|
||||
(DestVTI.Vec (Inst (SrcVTI.Vec MQPR:$Qm), imm:$scale,
|
||||
ARMVCCThen, (DestVTI.Pred VCCR:$mask),
|
||||
(DestVTI.Vec MQPR:$inactive)))>;
|
||||
}
|
||||
}
|
||||
|
||||
multiclass MVE_VCVT_fix_f32_m<bit U, bit op,
|
||||
MVEVectorVTInfo DestVTI, MVEVectorVTInfo SrcVTI> {
|
||||
def "" : MVE_VCVT_fix_f32<DestVTI.Suffix#"."#SrcVTI.Suffix, U, op>;
|
||||
defm : MVE_VCVT_fix_patterns<!cast<Instruction>(NAME), U, DestVTI, SrcVTI>;
|
||||
}
|
||||
|
||||
multiclass MVE_VCVT_fix_f16_m<bit U, bit op,
|
||||
MVEVectorVTInfo DestVTI, MVEVectorVTInfo SrcVTI> {
|
||||
def "" : MVE_VCVT_fix_f16<DestVTI.Suffix#"."#SrcVTI.Suffix, U, op>;
|
||||
defm : MVE_VCVT_fix_patterns<!cast<Instruction>(NAME), U, DestVTI, SrcVTI>;
|
||||
}
|
||||
|
||||
defm MVE_VCVTf16s16_fix : MVE_VCVT_fix_f16_m<0b0, 0b0, MVE_v8f16, MVE_v8s16>;
|
||||
defm MVE_VCVTs16f16_fix : MVE_VCVT_fix_f16_m<0b0, 0b1, MVE_v8s16, MVE_v8f16>;
|
||||
defm MVE_VCVTf16u16_fix : MVE_VCVT_fix_f16_m<0b1, 0b0, MVE_v8f16, MVE_v8u16>;
|
||||
defm MVE_VCVTu16f16_fix : MVE_VCVT_fix_f16_m<0b1, 0b1, MVE_v8u16, MVE_v8f16>;
|
||||
defm MVE_VCVTf32s32_fix : MVE_VCVT_fix_f32_m<0b0, 0b0, MVE_v4f32, MVE_v4s32>;
|
||||
defm MVE_VCVTs32f32_fix : MVE_VCVT_fix_f32_m<0b0, 0b1, MVE_v4s32, MVE_v4f32>;
|
||||
defm MVE_VCVTf32u32_fix : MVE_VCVT_fix_f32_m<0b1, 0b0, MVE_v4f32, MVE_v4u32>;
|
||||
defm MVE_VCVTu32f32_fix : MVE_VCVT_fix_f32_m<0b1, 0b1, MVE_v4u32, MVE_v4f32>;
|
||||
|
||||
class MVE_VCVT_fp_int_anpm<string suffix, bits<2> size, bit op, string anpm,
|
||||
bits<2> rm, list<dag> pattern=[]>
|
||||
|
|
|
@ -1,6 +1,21 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
|
||||
|
||||
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
|
||||
declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
|
||||
|
||||
declare <8 x half> @llvm.arm.mve.vcvt.narrow(<8 x half>, <4 x float>, i32)
|
||||
declare <8 x half> @llvm.arm.mve.vcvt.narrow.predicated(<8 x half>, <4 x float>, i32, <4 x i1>)
|
||||
|
||||
declare <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32, <8 x i16>, i32)
|
||||
declare <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32, <4 x i32>, i32)
|
||||
declare <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32, <8 x half>, i32)
|
||||
declare <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32, <4 x float>, i32)
|
||||
declare <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32, <8 x half>, <8 x i16>, i32, <8 x i1>)
|
||||
declare <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32, <4 x float>, <4 x i32>, i32, <4 x i1>)
|
||||
declare <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, i32, <8 x i1>)
|
||||
declare <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, i32, <4 x i1>)
|
||||
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvttq_f16_f32(<8 x half> %a, <4 x float> %b) {
|
||||
; CHECK-LABEL: test_vcvttq_f16_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
|
@ -21,8 +36,6 @@ entry:
|
|||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
declare <8 x half> @llvm.arm.mve.vcvt.narrow(<8 x half>, <4 x float>, i32)
|
||||
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvttq_m_f16_f32(<8 x half> %a, <4 x float> %b, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvttq_m_f16_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
|
@ -51,6 +64,306 @@ entry:
|
|||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvtq_n_f16_s16(<8 x i16> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_f16_s16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.f16.s16 q0, q0, #1
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 0, <8 x i16> %a, i32 1)
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
declare <8 x half> @llvm.arm.mve.vcvt.narrow.predicated(<8 x half>, <4 x float>, i32, <4 x i1>)
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvtq_n_f16_u16(<8 x i16> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_f16_u16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.f16.u16 q0, q0, #2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <8 x half> @llvm.arm.mve.vcvt.fix.v8f16.v8i16(i32 1, <8 x i16> %a, i32 2)
|
||||
ret <8 x half> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x float> @test_vcvtq_n_f32_s32(<4 x i32> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_f32_s32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.f32.s32 q0, q0, #3
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32 0, <4 x i32> %a, i32 3)
|
||||
ret <4 x float> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x float> @test_vcvtq_n_f32_u32(<4 x i32> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_f32_u32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.f32.u32 q0, q0, #32
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <4 x float> @llvm.arm.mve.vcvt.fix.v4f32.v4i32(i32 1, <4 x i32> %a, i32 32)
|
||||
ret <4 x float> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vcvtq_n_s16_f16(<8 x half> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_s16_f16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.s16.f16 q0, q0, #1
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 0, <8 x half> %a, i32 1)
|
||||
ret <8 x i16> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vcvtq_n_u16_f16(<8 x half> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_u16_f16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.u16.f16 q0, q0, #2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <8 x i16> @llvm.arm.mve.vcvt.fix.v8i16.v8f16(i32 1, <8 x half> %a, i32 2)
|
||||
ret <8 x i16> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x i32> @test_vcvtq_n_s32_f32(<4 x float> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_s32_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.s32.f32 q0, q0, #3
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32 0, <4 x float> %a, i32 3)
|
||||
ret <4 x i32> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x i32> @test_vcvtq_n_u32_f32(<4 x float> %a) {
|
||||
; CHECK-LABEL: test_vcvtq_n_u32_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vcvt.u32.f32 q0, q0, #32
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = call <4 x i32> @llvm.arm.mve.vcvt.fix.v4i32.v4f32(i32 1, <4 x float> %a, i32 32)
|
||||
ret <4 x i32> %0
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvtq_m_n_f16_s16(<8 x half> %inactive, <8 x i16> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_f16_s16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f16.s16 q0, q1, #1
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> %inactive, <8 x i16> %a, i32 1, <8 x i1> %1)
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvtq_m_n_f16_u16(<8 x half> %inactive, <8 x i16> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_f16_u16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f16.u16 q0, q1, #2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> %inactive, <8 x i16> %a, i32 2, <8 x i1> %1)
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x float> @test_vcvtq_m_n_f32_s32(<4 x float> %inactive, <4 x i32> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_f32_s32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f32.s32 q0, q1, #3
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 0, <4 x float> %inactive, <4 x i32> %a, i32 3, <4 x i1> %1)
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x float> @test_vcvtq_m_n_f32_u32(<4 x float> %inactive, <4 x i32> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_f32_u32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f32.u32 q0, q1, #32
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 1, <4 x float> %inactive, <4 x i32> %a, i32 32, <4 x i1> %1)
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vcvtq_m_n_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_s16_f16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.s16.f16 q0, q1, #1
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, i32 1, <8 x i1> %1)
|
||||
ret <8 x i16> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vcvtq_m_n_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_u16_f16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.u16.f16 q0, q1, #2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, i32 2, <8 x i1> %1)
|
||||
ret <8 x i16> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x i32> @test_vcvtq_m_n_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_s32_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.s32.f32 q0, q1, #3
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, i32 3, <4 x i1> %1)
|
||||
ret <4 x i32> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x i32> @test_vcvtq_m_n_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_m_n_u32_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.u32.f32 q0, q1, #32
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, i32 32, <4 x i1> %1)
|
||||
ret <4 x i32> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvtq_x_n_f16_s16(<8 x i16> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_f16_s16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f16.s16 q0, q0, #1
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 0, <8 x half> undef, <8 x i16> %a, i32 1, <8 x i1> %1)
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x half> @test_vcvtq_x_n_f16_u16(<8 x i16> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_f16_u16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f16.u16 q0, q0, #2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x half> @llvm.arm.mve.vcvt.fix.predicated.v8f16.v8i16.v8i1(i32 1, <8 x half> undef, <8 x i16> %a, i32 2, <8 x i1> %1)
|
||||
ret <8 x half> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x float> @test_vcvtq_x_n_f32_s32(<4 x i32> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_f32_s32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f32.s32 q0, q0, #3
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 0, <4 x float> undef, <4 x i32> %a, i32 3, <4 x i1> %1)
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x float> @test_vcvtq_x_n_f32_u32(<4 x i32> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_f32_u32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.f32.u32 q0, q0, #32
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x float> @llvm.arm.mve.vcvt.fix.predicated.v4f32.v4i32.v4i1(i32 1, <4 x float> undef, <4 x i32> %a, i32 32, <4 x i1> %1)
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vcvtq_x_n_s16_f16(<8 x half> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_s16_f16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.s16.f16 q0, q0, #1
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, i32 1, <8 x i1> %1)
|
||||
ret <8 x i16> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vcvtq_x_n_u16_f16(<8 x half> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_u16_f16:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.u16.f16 q0, q0, #2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
|
||||
%2 = call <8 x i16> @llvm.arm.mve.vcvt.fix.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, i32 2, <8 x i1> %1)
|
||||
ret <8 x i16> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x i32> @test_vcvtq_x_n_s32_f32(<4 x float> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_s32_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.s32.f32 q0, q0, #3
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, i32 3, <4 x i1> %1)
|
||||
ret <4 x i32> %2
|
||||
}
|
||||
|
||||
define arm_aapcs_vfpcc <4 x i32> @test_vcvtq_x_n_u32_f32(<4 x float> %a, i16 zeroext %p) {
|
||||
; CHECK-LABEL: test_vcvtq_x_n_u32_f32:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vmsr p0, r0
|
||||
; CHECK-NEXT: vpst
|
||||
; CHECK-NEXT: vcvtt.u32.f32 q0, q0, #32
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = zext i16 %p to i32
|
||||
%1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
|
||||
%2 = call <4 x i32> @llvm.arm.mve.vcvt.fix.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, i32 32, <4 x i1> %1)
|
||||
ret <4 x i32> %2
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue