forked from OSchip/llvm-project
982 lines
41 KiB
C
982 lines
41 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
|
|
|
|
#include <arm_mve.h>
|
|
|
|
// CHECK-LABEL: @test_vfmaq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP0]]
|
|
//
|
|
float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_f16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP0]]
|
|
//
|
|
float32x4_t test_vfmaq_f32(float32x4_t a, float32x4_t b, float32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_f32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[B:%.*]], <8 x half> [[DOTSPLAT]], <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP0]]
|
|
//
|
|
float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_n_f16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[B:%.*]], <4 x float> [[DOTSPLAT]], <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP0]]
|
|
//
|
|
float32x4_t test_vfmaq_n_f32(float32x4_t a, float32x4_t b, float32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_n_f32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmasq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[DOTSPLAT]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP0]]
|
|
//
|
|
float16x8_t test_vfmasq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmasq_n_f16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmasq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[DOTSPLAT]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP0]]
|
|
//
|
|
float32x4_t test_vfmasq_n_f32(float32x4_t a, float32x4_t b, float32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmasq_n_f32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmsq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fneg <8 x half> [[C:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[B:%.*]], <8 x half> [[TMP0]], <8 x half> [[A:%.*]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP1]]
|
|
//
|
|
float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmsq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmsq_f16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmsq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fneg <4 x float> [[C:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[B:%.*]], <4 x float> [[TMP0]], <4 x float> [[A:%.*]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP1]]
|
|
//
|
|
float32x4_t test_vfmsq_f32(float32x4_t a, float32x4_t b, float32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmsq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vfmsq_f32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <16 x i8> [[B:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <16 x i8> [[TMP0]], [[A:%.*]]
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
int8x16_t test_vmlaq_n_s8(int8x16_t a, int8x16_t b, int8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_n_s8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <8 x i16> [[B:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[TMP0]], [[A:%.*]]
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vmlaq_n_s16(int16x8_t a, int16x8_t b, int16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_n_s16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[B:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[TMP0]], [[A:%.*]]
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vmlaq_n_s32(int32x4_t a, int32x4_t b, int32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_n_s32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <16 x i8> [[B:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <16 x i8> [[TMP0]], [[A:%.*]]
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
uint8x16_t test_vmlaq_n_u8(uint8x16_t a, uint8x16_t b, uint8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_n_u8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <8 x i16> [[B:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[TMP0]], [[A:%.*]]
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
uint16x8_t test_vmlaq_n_u16(uint16x8_t a, uint16x8_t b, uint16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_n_u16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[B:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[TMP0]], [[A:%.*]]
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
uint32x4_t test_vmlaq_n_u32(uint32x4_t a, uint32x4_t b, uint32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_n_u32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <16 x i8> [[TMP0]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
int8x16_t test_vmlasq_n_s8(int8x16_t a, int8x16_t b, int8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_n_s8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[TMP0]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vmlasq_n_s16(int16x8_t a, int16x8_t b, int16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_n_s16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[TMP0]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vmlasq_n_s32(int32x4_t a, int32x4_t b, int32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_n_s32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <16 x i8> [[TMP0]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
uint8x16_t test_vmlasq_n_u8(uint8x16_t a, uint8x16_t b, uint8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_n_u8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[TMP0]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
uint16x8_t test_vmlasq_n_u16(uint16x8_t a, uint16x8_t b, uint16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_n_u16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[TMP0]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
uint32x4_t test_vmlasq_n_u32(uint32x4_t a, uint32x4_t b, uint32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_n_u32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlahq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlah.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
int8x16_t test_vqdmlahq_n_s8(int8x16_t a, int8x16_t b, int8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlahq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlahq_n_s8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlahq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlah.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vqdmlahq_n_s16(int16x8_t a, int16x8_t b, int16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlahq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlahq_n_s16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlahq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlah.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vqdmlahq_n_s32(int32x4_t a, int32x4_t b, int32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlahq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlahq_n_s32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlashq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[ADD:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlash.v16i8(<16 x i8> [[M1:%.*]], <16 x i8> [[M2:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
int8x16_t test_vqdmlashq_n_s8(int8x16_t m1, int8x16_t m2, int8_t add) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlashq(m1, m2, add);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlashq_n_s8(m1, m2, add);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlashq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[ADD:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlash.v8i16(<8 x i16> [[M1:%.*]], <8 x i16> [[M2:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vqdmlashq_n_s16(int16x8_t m1, int16x8_t m2, int16_t add) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlashq(m1, m2, add);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlashq_n_s16(m1, m2, add);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlashq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlash.v4i32(<4 x i32> [[M1:%.*]], <4 x i32> [[M2:%.*]], i32 [[ADD:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vqdmlashq_n_s32(int32x4_t m1, int32x4_t m2, int32_t add) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlashq(m1, m2, add);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlashq_n_s32(m1, m2, add);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlahq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i8> @llvm.arm.mve.vqrdmlah.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
int8x16_t test_vqrdmlahq_n_s8(int8x16_t a, int8x16_t b, int8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlahq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlahq_n_s8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlahq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.arm.mve.vqrdmlah.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vqrdmlahq_n_s16(int16x8_t a, int16x8_t b, int16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlahq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlahq_n_s16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlahq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqrdmlah.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vqrdmlahq_n_s32(int32x4_t a, int32x4_t b, int32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlahq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlahq_n_s32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlashq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i8> @llvm.arm.mve.vqrdmlash.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
//
|
|
int8x16_t test_vqrdmlashq_n_s8(int8x16_t a, int8x16_t b, int8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlashq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlashq_n_s8(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlashq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i16> @llvm.arm.mve.vqrdmlash.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vqrdmlashq_n_s16(int16x8_t a, int16x8_t b, int16_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlashq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlashq_n_s16(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlashq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqrdmlash.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vqrdmlashq_n_s32(int32x4_t a, int32x4_t b, int32_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlashq(a, b, c);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlashq_n_s32(a, b, c);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.fma.predicated.v8f16.v8i1(<8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
|
//
|
|
float16x8_t test_vfmaq_m_f16(float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_m_f16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
|
//
|
|
float32x4_t test_vfmaq_m_f32(float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_m_f32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.fma.predicated.v8f16.v8i1(<8 x half> [[B:%.*]], <8 x half> [[DOTSPLAT]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
|
//
|
|
float16x8_t test_vfmaq_m_n_f16(float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_m_n_f16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmaq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> [[B:%.*]], <4 x float> [[DOTSPLAT]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
|
//
|
|
float32x4_t test_vfmaq_m_n_f32(float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmaq_m_n_f32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmasq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.fma.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[DOTSPLAT]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP2]]
|
|
//
|
|
float16x8_t test_vfmasq_m_n_f16(float16x8_t a, float16x8_t b, float16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmasq_m_n_f16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmasq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[C:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[DOTSPLAT]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP2]]
|
|
//
|
|
float32x4_t test_vfmasq_m_n_f32(float32x4_t a, float32x4_t b, float32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmasq_m_n_f32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmsq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fneg <8 x half> [[C:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.arm.mve.fma.predicated.v8f16.v8i1(<8 x half> [[B:%.*]], <8 x half> [[TMP0]], <8 x half> [[A:%.*]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x half> [[TMP3]]
|
|
//
|
|
float16x8_t test_vfmsq_m_f16(float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmsq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmsq_m_f16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vfmsq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fneg <4 x float> [[C:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> [[B:%.*]], <4 x float> [[TMP0]], <4 x float> [[A:%.*]], <4 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <4 x float> [[TMP3]]
|
|
//
|
|
float32x4_t test_vfmsq_m_f32(float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vfmsq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vfmsq_m_f32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vmla.n.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
int8x16_t test_vmlaq_m_n_s8(int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_m_n_s8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vmla.n.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
int16x8_t test_vmlaq_m_n_s16(int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_m_n_s16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmla.n.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vmlaq_m_n_s32(int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_m_n_s32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vmla.n.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
uint8x16_t test_vmlaq_m_n_u8(uint8x16_t a, uint8x16_t b, uint8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_m_n_u8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vmla.n.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
uint16x8_t test_vmlaq_m_n_u16(uint16x8_t a, uint16x8_t b, uint16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_m_n_u16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmla.n.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vmlaq_m_n_u32(uint32x4_t a, uint32x4_t b, uint32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlaq_m_n_u32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vmlas.n.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
int8x16_t test_vmlasq_m_n_s8(int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_m_n_s8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vmlas.n.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
int16x8_t test_vmlasq_m_n_s16(int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_m_n_s16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmlas.n.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vmlasq_m_n_s32(int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_m_n_s32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vmlas.n.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
uint8x16_t test_vmlasq_m_n_u8(uint8x16_t a, uint8x16_t b, uint8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_m_n_u8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vmlas.n.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
uint16x8_t test_vmlasq_m_n_u16(uint16x8_t a, uint16x8_t b, uint16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_m_n_u16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlasq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmlas.n.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vmlasq_m_n_u32(uint32x4_t a, uint32x4_t b, uint32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlasq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vmlasq_m_n_u32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlahq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlah.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
int8x16_t test_vqdmlahq_m_n_s8(int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlahq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlahq_m_n_s8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlahq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlah.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
int16x8_t test_vqdmlahq_m_n_s16(int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlahq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlahq_m_n_s16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlahq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlah.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vqdmlahq_m_n_s32(int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlahq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlahq_m_n_s32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlashq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[ADD:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlash.predicated.v16i8.v16i1(<16 x i8> [[M1:%.*]], <16 x i8> [[M2:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
int8x16_t test_vqdmlashq_m_n_s8(int8x16_t m1, int8x16_t m2, int8_t add, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlashq_m(m1, m2, add, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlashq_m_n_s8(m1, m2, add, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlashq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[ADD:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlash.predicated.v8i16.v8i1(<8 x i16> [[M1:%.*]], <8 x i16> [[M2:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
int16x8_t test_vqdmlashq_m_n_s16(int16x8_t m1, int16x8_t m2, int16_t add, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlashq_m(m1, m2, add, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlashq_m_n_s16(m1, m2, add, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqdmlashq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlash.predicated.v4i32.v4i1(<4 x i32> [[M1:%.*]], <4 x i32> [[M2:%.*]], i32 [[ADD:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vqdmlashq_m_n_s32(int32x4_t m1, int32x4_t m2, int32_t add, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqdmlashq_m(m1, m2, add, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqdmlashq_m_n_s32(m1, m2, add, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlahq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vqrdmlah.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
int8x16_t test_vqrdmlahq_m_n_s8(int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlahq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlahq_m_n_s8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlahq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vqrdmlah.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
int16x8_t test_vqrdmlahq_m_n_s16(int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlahq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlahq_m_n_s16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlahq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqrdmlah.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vqrdmlahq_m_n_s32(int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlahq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlahq_m_n_s32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlashq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.arm.mve.vqrdmlash.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 [[TMP0]], <16 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP3]]
|
|
//
|
|
int8x16_t test_vqrdmlashq_m_n_s8(int8x16_t a, int8x16_t b, int8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlashq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlashq_m_n_s8(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlashq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[C:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.arm.mve.vqrdmlash.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 [[TMP0]], <8 x i1> [[TMP2]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
|
|
//
|
|
int16x8_t test_vqrdmlashq_m_n_s16(int16x8_t a, int16x8_t b, int16_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlashq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlashq_m_n_s16(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqrdmlashq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqrdmlash.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[C:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vqrdmlashq_m_n_s32(int32x4_t a, int32x4_t b, int32_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vqrdmlashq_m(a, b, c, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqrdmlashq_m_n_s32(a, b, c, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|