forked from OSchip/llvm-project
617 lines
24 KiB
C
617 lines
24 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
|
|
// RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
|
|
|
|
// REQUIRES: aarch64-registered-target || arm-registered-target
|
|
|
|
#include <arm_mve.h>
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vcvtaq_s16_f16(float16x8_t a)
|
|
{
|
|
return vcvtaq_s16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vcvtaq_s32_f32(float32x4_t a)
|
|
{
|
|
return vcvtaq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vcvtaq_u16_f16(float16x8_t a)
|
|
{
|
|
return vcvtaq_u16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vcvtaq_u32_f32(float32x4_t a)
|
|
{
|
|
return vcvtaq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vcvtmq_s16_f16(float16x8_t a)
|
|
{
|
|
return vcvtmq_s16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vcvtmq_s32_f32(float32x4_t a)
|
|
{
|
|
return vcvtmq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vcvtmq_u16_f16(float16x8_t a)
|
|
{
|
|
return vcvtmq_u16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vcvtmq_u32_f32(float32x4_t a)
|
|
{
|
|
return vcvtmq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vcvtnq_s16_f16(float16x8_t a)
|
|
{
|
|
return vcvtnq_s16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vcvtnq_s32_f32(float32x4_t a)
|
|
{
|
|
return vcvtnq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vcvtnq_u16_f16(float16x8_t a)
|
|
{
|
|
return vcvtnq_u16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vcvtnq_u32_f32(float32x4_t a)
|
|
{
|
|
return vcvtnq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vcvtpq_s16_f16(float16x8_t a)
|
|
{
|
|
return vcvtpq_s16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vcvtpq_s32_f32(float32x4_t a)
|
|
{
|
|
return vcvtpq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vcvtpq_u16_f16(float16x8_t a)
|
|
{
|
|
return vcvtpq_u16_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vcvtpq_u32_f32(float32x4_t a)
|
|
{
|
|
return vcvtpq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_m_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtaq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtaq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtaq_m_s16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_m_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtaq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtaq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtaq_m_s32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_m_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtaq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtaq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtaq_m_u16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_m_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtaq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtaq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtaq_m_u32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_m_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtmq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtmq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtmq_m_s16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_m_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtmq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtmq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtmq_m_s32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_m_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtmq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtmq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtmq_m_u16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_m_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtmq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtmq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtmq_m_u32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_m_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtnq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtnq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtnq_m_s16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_m_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtnq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtnq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtnq_m_s32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_m_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtnq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtnq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtnq_m_u16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_m_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtnq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtnq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtnq_m_u32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_m_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtpq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtpq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtpq_m_s16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_m_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtpq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtpq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtpq_m_s32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_m_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtpq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtpq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtpq_m_u16_f16(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_m_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtpq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcvtpq_m(inactive, a, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcvtpq_m_u32_f32(inactive, a, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_x_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtaq_x_s16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtaq_x_s16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_x_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtaq_x_s32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtaq_x_s32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_x_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtaq_x_u16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtaq_x_u16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtaq_x_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtaq_x_u32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtaq_x_u32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_x_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtmq_x_s16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtmq_x_s16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_x_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtmq_x_s32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtmq_x_s32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_x_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtmq_x_u16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtmq_x_u16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtmq_x_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtmq_x_u32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtmq_x_u32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_x_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtnq_x_s16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtnq_x_s16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_x_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtnq_x_s32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtnq_x_s32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_x_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtnq_x_u16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtnq_x_u16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtnq_x_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtnq_x_u32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtnq_x_u32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_x_s16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vcvtpq_x_s16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtpq_x_s16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_x_s32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vcvtpq_x_s32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtpq_x_s32_f32(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_x_u16_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vcvtpq_x_u16_f16(float16x8_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtpq_x_u16_f16(a, p);
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcvtpq_x_u32_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vcvtpq_x_u32_f32(float32x4_t a, mve_pred16_t p)
|
|
{
|
|
return vcvtpq_x_u32_f32(a, p);
|
|
}
|
|
|