[ARM][MVE] Add vector reduction intrinsics with two vector operands
Summary:
This patch adds intrinsics for the following MVE instructions:
* VABAV
* VMLADAV, VMLSDAV
* VMLALDAV, VMLSLDAV
* VRMLALDAVH, VRMLSLDAVH
Each of the above 4 groups has a corresponding new LLVM IR intrinsic,
since the instructions cannot be easily represented using
general-purpose IR operations.
Reviewers: simon_tatham, ostannard, dmgreen, MarkMurrayARM
Reviewed By: MarkMurrayARM
Subscribers: merge_guards_bot, kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D71062
2019-12-13 21:17:29 +08:00
|
|
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
2020-04-22 23:33:11 +08:00
|
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s
|
|
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s
|
[ARM][MVE] Add vector reduction intrinsics with two vector operands
Summary:
This patch adds intrinsics for the following MVE instructions:
* VABAV
* VMLADAV, VMLSDAV
* VMLALDAV, VMLSLDAV
* VRMLALDAVH, VRMLSLDAVH
Each of the above 4 groups has a corresponding new LLVM IR intrinsic,
since the instructions cannot be easily represented using
general-purpose IR operations.
Reviewers: simon_tatham, ostannard, dmgreen, MarkMurrayARM
Reviewed By: MarkMurrayARM
Subscribers: merge_guards_bot, kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D71062
2019-12-13 21:17:29 +08:00
|
|
|
|
|
|
|
#include <arm_mve.h>
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaq_s8(int32_t a, int8x16_t b, int8x16_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaq_s8(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaq_s16(int32_t a, int16x8_t b, int16x8_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaq_s16(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaq_s32(int32_t a, int32x4_t b, int32x4_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaq_s32(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_u8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavaq_u8(uint32_t a, uint8x16_t b, uint8x16_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaq_u8(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_u16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavaq_u16(uint32_t a, uint16x8_t b, uint16x8_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaq_u16(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_u32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavaq_u32(uint32_t a, uint32x4_t b, uint32x4_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaq_u32(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaxq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaxq_s8(int32_t a, int8x16_t b, int8x16_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaxq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaxq_s8(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaxq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaxq_s16(int32_t a, int16x8_t b, int16x8_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaxq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaxq_s16(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaxq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaxq_s32(int32_t a, int32x4_t b, int32x4_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaxq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmladavaxq_s32(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaq_s8(int32_t a, int8x16_t b, int8x16_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmlsdavaq_s8(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaq_s16(int32_t a, int16x8_t b, int16x8_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmlsdavaq_s16(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaq_s32(int32_t a, int32x4_t b, int32x4_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmlsdavaq_s32(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaxq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaxq_s8(int32_t a, int8x16_t b, int8x16_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaxq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmlsdavaxq_s8(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaxq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaxq_s16(int32_t a, int16x8_t b, int16x8_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaxq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmlsdavaxq_s16(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaxq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaxq_s32(int32_t a, int32x4_t b, int32x4_t c) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaxq(a, b, c);
|
|
|
|
#else
|
|
|
|
return vmlsdavaxq_s32(a, b, c);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaq_p_s8(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaq_p_s16(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaq_p_s32(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_p_u8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 1, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavaq_p_u8(uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaq_p_u8(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_p_u16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 1, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavaq_p_u16(uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaq_p_u16(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaq_p_u32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavaq_p_u32(uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaq_p_u32(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaxq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaxq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaxq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaxq_p_s8(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaxq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaxq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaxq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaxq_p_s16(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavaxq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavaxq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavaxq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmladavaxq_p_s32(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavaq_p_s8(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavaq_p_s16(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavaq_p_s32(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaxq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaxq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaxq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavaxq_p_s8(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaxq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaxq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaxq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavaxq_p_s16(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavaxq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavaxq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavaxq_p(a, b, c, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavaxq_p_s32(a, b, c, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavq_s8(int8x16_t a, int8x16_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavq_s8(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavq_s16(int16x8_t a, int16x8_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavq_s16(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavq_s32(int32x4_t a, int32x4_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavq_s32(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_u8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavq_u8(uint8x16_t a, uint8x16_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavq_u8(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_u16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavq_u16(uint16x8_t a, uint16x8_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavq_u16(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_u32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavq_u32(uint32x4_t a, uint32x4_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavq_u32(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavxq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavxq_s8(int8x16_t a, int8x16_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavxq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavxq_s8(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavxq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavxq_s16(int16x8_t a, int16x8_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavxq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavxq_s16(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavxq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavxq_s32(int32x4_t a, int32x4_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavxq(a, b);
|
|
|
|
#else
|
|
|
|
return vmladavxq_s32(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavq_s8(int8x16_t a, int8x16_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmlsdavq_s8(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavq_s16(int16x8_t a, int16x8_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmlsdavq_s16(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavq_s32(int32x4_t a, int32x4_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavq(a, b);
|
|
|
|
#else
|
|
|
|
return vmlsdavq_s32(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavxq_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavxq_s8(int8x16_t a, int8x16_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavxq(a, b);
|
|
|
|
#else
|
|
|
|
return vmlsdavxq_s8(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavxq_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavxq_s16(int16x8_t a, int16x8_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavxq(a, b);
|
|
|
|
#else
|
|
|
|
return vmlsdavxq_s16(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavxq_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavxq_s32(int32x4_t a, int32x4_t b) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavxq(a, b);
|
|
|
|
#else
|
|
|
|
return vmlsdavxq_s32(a, b);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavq_p_s8(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavq_p_s16(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavq_p_s32(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_p_u8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 1, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavq_p_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavq_p_u8(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_p_u16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 1, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavq_p_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavq_p_u16(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavq_p_u32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
uint32_t test_vmladavq_p_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavq_p_u32(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavxq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavxq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavxq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavxq_p_s8(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavxq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavxq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavxq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavxq_p_s16(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmladavxq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmladavxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmladavxq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmladavxq_p_s32(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavq_p_s8(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavq_p_s16(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavq_p_s32(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavxq_p_s8(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavxq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavxq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavxq_p_s8(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavxq_p_s16(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavxq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavxq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavxq_p_s16(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: @test_vmlsdavxq_p_s32(
|
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP2]]
|
|
|
|
//
|
|
|
|
int32_t test_vmlsdavxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
|
|
#ifdef POLYMORPHIC
|
|
|
|
return vmlsdavxq_p(a, b, p);
|
|
|
|
#else
|
|
|
|
return vmlsdavxq_p_s32(a, b, p);
|
|
|
|
#endif
|
|
|
|
}
|