[SveEmitter] Add builtins for svdupq and svdupq_lane

* svdupq builtins that duplicate scalars to every quadword of a vector
  are defined using builtins for svld1rq (load and replicate quadword).
* svdupq builtins that duplicate boolean values to fill a predicate vector
  are defined using `svcmpne`.

Reviewers: SjoerdMeijer, efriedma, ctetreau

Reviewed By: efriedma

Tags: #clang

Differential Revision: https://reviews.llvm.org/D78750
This commit is contained in:
Sander de Smalen 2020-05-04 20:17:57 +01:00
parent 1eb160fe8d
commit 6b90a6887d
5 changed files with 482 additions and 5 deletions

View File

@ -610,6 +610,13 @@ def SVPRFW_GATHER_BASES_OFFSET : MInst<"svprfw_gather[_{2}base]_index", "vPdlJ"
def SVPRFD_GATHER_BASES_OFFSET : MInst<"svprfd_gather[_{2}base]_index", "vPdlJ", "UiUl", [IsGatherPrefetch], MemEltTyInt64, "aarch64_sve_prfd_gather_scalar_offset">;
////////////////////////////////////////////////////////////////////////////////
// Scalar to vector
def SVDUPQ_8 : SInst<"svdupq[_n]_{d}", "dssssssssssssssss", "cUc", MergeNone>;
def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss", "sUsh", MergeNone>;
def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss", "iUif", MergeNone>;
def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss", "lUld", MergeNone>;
// Integer arithmetic
multiclass SInstZPZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
@ -1034,7 +1041,7 @@ def SVCLASTB : SInst<"svclastb[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNo
def SVCLASTB_N : SInst<"svclastb[_n_{d}]", "sPsd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clastb_n">;
def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact">;
// SVDUP_LANE (to land in D78750)
// SVDUPQ_LANE (to land in D78750)
def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane">;
def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
def SVLASTA : SInst<"svlasta[_{d}]", "sPd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_lasta">;
def SVLASTB : SInst<"svlastb[_{d}]", "sPd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_lastb">;
@ -1072,6 +1079,12 @@ def SVPFALSE : SInst<"svpfalse[_b]", "P", "", MergeNone, "", [IsOverloadNone]>;
def SVPTRUE_PAT : SInst<"svptrue_pat_{d}", "PI", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue">;
def SVPTRUE : SInst<"svptrue_{d}", "P", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsAppendSVALL]>;
def SVDUPQ_B8 : SInst<"svdupq[_n]_{d}", "Pssssssssssssssss", "Pc", MergeNone>;
def SVDUPQ_B16 : SInst<"svdupq[_n]_{d}", "Pssssssss", "Ps", MergeNone>;
def SVDUPQ_B32 : SInst<"svdupq[_n]_{d}", "Pssss", "Pi", MergeNone>;
def SVDUPQ_B64 : SInst<"svdupq[_n]_{d}", "Pss", "Pl", MergeNone>;
////////////////////////////////////////////////////////////////////////////////
// Predicate operations

View File

@ -7562,6 +7562,15 @@ CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
case SVETypeFlags::EltTyFloat64:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
case SVETypeFlags::EltTyBool8:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
case SVETypeFlags::EltTyBool16:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
case SVETypeFlags::EltTyBool32:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
case SVETypeFlags::EltTyBool64:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
}
}
@ -7599,6 +7608,12 @@ CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
}
}
llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
Function *Ptrue =
CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
}
constexpr unsigned SVEBitsPerBlock = 128;
static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
@ -8007,6 +8022,64 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
}
case SVE::BI__builtin_sve_svdupq_n_b8:
case SVE::BI__builtin_sve_svdupq_n_b16:
case SVE::BI__builtin_sve_svdupq_n_b32:
case SVE::BI__builtin_sve_svdupq_n_b64:
case SVE::BI__builtin_sve_svdupq_n_u8:
case SVE::BI__builtin_sve_svdupq_n_s8:
case SVE::BI__builtin_sve_svdupq_n_u64:
case SVE::BI__builtin_sve_svdupq_n_f64:
case SVE::BI__builtin_sve_svdupq_n_s64:
case SVE::BI__builtin_sve_svdupq_n_u16:
case SVE::BI__builtin_sve_svdupq_n_f16:
case SVE::BI__builtin_sve_svdupq_n_s16:
case SVE::BI__builtin_sve_svdupq_n_u32:
case SVE::BI__builtin_sve_svdupq_n_f32:
case SVE::BI__builtin_sve_svdupq_n_s32: {
// These builtins are implemented by storing each element to an array and using
// ld1rq to materialize a vector.
unsigned NumOpnds = Ops.size();
bool IsBoolTy =
cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
// For svdupq_n_b* the element type of is an integer of type 128/numelts,
// so that the compare can use the width that is natural for the expected
// number of predicate lanes.
llvm::Type *EltTy = Ops[0]->getType();
if (IsBoolTy)
EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
CharUnits::fromQuantity(16));
for (unsigned I = 0; I < NumOpnds; ++I)
Builder.CreateDefaultAlignedStore(
IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
Builder.CreateGEP(Alloca.getPointer(),
{Builder.getInt64(0), Builder.getInt64(I)}));
SVETypeFlags TypeFlags(Builtin->TypeModifier);
Value *Pred = EmitSVEAllTruePred(TypeFlags);
llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
Value *Alloca0 = Builder.CreateGEP(
Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
if (!IsBoolTy)
return LD1RQ;
// For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
: Intrinsic::aarch64_sve_cmpne_wide,
OverloadedTy);
Value *Call =
Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
}
case SVE::BI__builtin_sve_svpfalse_b:
return ConstantInt::getFalse(Ty);

View File

@ -3918,6 +3918,7 @@ public:
llvm::Type *getEltType(SVETypeFlags TypeFlags);
llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags);
llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
llvm::ScalableVectorType *VTy);

View File

@ -0,0 +1,389 @@
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
#include <arm_sve.h>
#ifdef SVE_OVERLOADED_FORMS
// A simple used,unused... macro, long enough to represent any SVE builtin.
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
#else
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
#endif
svint8_t test_svdupq_lane_s8(svint8_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_s8
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %data, i64 %index)
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_s8,,)(data, index);
}
svint16_t test_svdupq_lane_s16(svint16_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_s16
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %data, i64 %index)
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_s16,,)(data, index);
}
svint32_t test_svdupq_lane_s32(svint32_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_s32
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %data, i64 %index)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_s32,,)(data, index);
}
svint64_t test_svdupq_lane_s64(svint64_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_s64
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %data, i64 %index)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_s64,,)(data, index);
}
svuint8_t test_svdupq_lane_u8(svuint8_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_u8
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %data, i64 %index)
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_u8,,)(data, index);
}
svuint16_t test_svdupq_lane_u16(svuint16_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_u16
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %data, i64 %index)
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_u16,,)(data, index);
}
svuint32_t test_svdupq_lane_u32(svuint32_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_u32
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %data, i64 %index)
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_u32,,)(data, index);
}
svuint64_t test_svdupq_lane_u64(svuint64_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_u64
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %data, i64 %index)
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_u64,,)(data, index);
}
svfloat16_t test_svdupq_lane_f16(svfloat16_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_f16
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %data, i64 %index)
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_f16,,)(data, index);
}
svfloat32_t test_svdupq_lane_f32(svfloat32_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_f32
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %data, i64 %index)
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_f32,,)(data, index);
}
svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index)
{
// CHECK-LABEL: test_svdupq_lane_f64
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %data, i64 %index)
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
return SVE_ACLE_FUNC(svdupq_lane,_f64,,)(data, index);
}
svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3,
int8_t x4, int8_t x5, int8_t x6, int8_t x7,
int8_t x8, int8_t x9, int8_t x10, int8_t x11,
int8_t x12, int8_t x13, int8_t x14, int8_t x15)
{
// CHECK-LABEL: test_svdupq_n_s8
// CHECK: %[[ALLOCA:.*]] = alloca [16 x i8], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i8 %x0, i8* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 15
// CHECK: store i8 %x15, i8* %[[GEP]], align 1
// CHECK-NOT: store
// CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], i8* nonnull %[[BASE]])
// CHECK: ret <vscale x 16 x i8> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_s8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
}
svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3,
int16_t x4, int16_t x5, int16_t x6, int16_t x7)
{
// CHECK-LABEL: test_svdupq_n_s16
// CHECK: %[[ALLOCA:.*]] = alloca [8 x i16], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i16 %x0, i16* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 7
// CHECK: store i16 %x7, i16* %[[GEP]], align 2
// CHECK-NOT: store
// CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %{{.*}}, i16* nonnull %[[BASE]])
// CHECK: ret <vscale x 8 x i16> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_s16,)(x0, x1, x2, x3, x4, x5, x6, x7);
}
svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3)
{
// CHECK-LABEL: test_svdupq_n_s32
// CHECK: %[[ALLOCA:.*]] = alloca [4 x i32], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i32 %x0, i32* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 3
// CHECK: store i32 %x3, i32* %[[GEP]], align 4
// CHECK-NOT: store
// CHECK: call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %{{.*}}, i32* nonnull %[[BASE]])
// CHECK: ret <vscale x 4 x i32> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_s32,)(x0, x1, x2, x3);
}
svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1)
{
// CHECK-LABEL: test_svdupq_n_s64
// CHECK: %[[ALLOCA:.*]] = alloca [2 x i64], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i64 %x0, i64* %[[BASE]], align 16
// CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
// CHECK: store i64 %x1, i64* %[[GEP]], align 8
// CHECK-NOT: store
// CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
// CHECK: ret <vscale x 2 x i64> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_s64,)(x0, x1);
}
svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3,
uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7,
uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11,
uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15)
{
// CHECK-LABEL: test_svdupq_n_u8
// CHECK: %[[ALLOCA:.*]] = alloca [16 x i8], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i8 %x0, i8* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 15
// CHECK: store i8 %x15, i8* %[[GEP]], align 1
// CHECK-NOT: store
// CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], i8* nonnull %[[BASE]])
// CHECK: ret <vscale x 16 x i8> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_u8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
}
svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3,
uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7)
{
// CHECK-LABEL: test_svdupq_n_u16
// CHECK: %[[ALLOCA:.*]] = alloca [8 x i16], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i16 %x0, i16* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 7
// CHECK: store i16 %x7, i16* %[[GEP]], align 2
// CHECK-NOT: store
// CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %{{.*}}, i16* nonnull %[[BASE]])
// CHECK: ret <vscale x 8 x i16> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_u16,)(x0, x1, x2, x3, x4, x5, x6, x7);
}
svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3)
{
// CHECK-LABEL: test_svdupq_n_u32
// CHECK: %[[ALLOCA:.*]] = alloca [4 x i32], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i32 %x0, i32* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 3
// CHECK: store i32 %x3, i32* %[[GEP]], align 4
// CHECK-NOT: store
// CHECK: call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %{{.*}}, i32* nonnull %[[BASE]])
// CHECK: ret <vscale x 4 x i32> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_u32,)(x0, x1, x2, x3);
}
svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1)
{
// CHECK-LABEL: test_svdupq_n_u64
// CHECK: %[[ALLOCA:.*]] = alloca [2 x i64], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i64 %x0, i64* %[[BASE]], align 16
// CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
// CHECK: store i64 %x1, i64* %[[GEP]], align 8
// CHECK-NOT: store
// CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
// CHECK: ret <vscale x 2 x i64> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_u64,)(x0, x1);
}
svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3,
float16_t x4, float16_t x5, float16_t x6, float16_t x7)
{
// CHECK-LABEL: test_svdupq_n_f16
// CHECK: %[[ALLOCA:.*]] = alloca [8 x half], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [8 x half], [8 x half]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store half %x0, half* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x half], [8 x half]* %[[ALLOCA]], i64 0, i64 7
// CHECK: store half %x7, half* %[[GEP]], align 2
// CHECK-NOT: store
// CHECK: call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %{{.*}}, half* nonnull %[[BASE]])
// CHECK: ret <vscale x 8 x half> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_f16,)(x0, x1, x2, x3, x4, x5, x6, x7);
}
svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3)
{
// CHECK-LABEL: test_svdupq_n_f32
// CHECK: %[[ALLOCA:.*]] = alloca [4 x float], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [4 x float], [4 x float]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store float %x0, float* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x float], [4 x float]* %[[ALLOCA]], i64 0, i64 3
// CHECK: store float %x3, float* %[[GEP]], align 4
// CHECK-NOT: store
// CHECK: call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %{{.*}}, float* nonnull %[[BASE]])
// CHECK: ret <vscale x 4 x float> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_f32,)(x0, x1, x2, x3);
}
svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1)
{
// CHECK-LABEL: test_svdupq_n_f64
// CHECK: %[[ALLOCA:.*]] = alloca [2 x double], align 16
// CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x double], [2 x double]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store double %x0, double* %[[BASE]], align 16
// CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x double], [2 x double]* %[[ALLOCA]], i64 0, i64 1
// CHECK: store double %x1, double* %[[GEP]], align 8
// CHECK-NOT: store
// CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %{{.*}}, double* nonnull %[[BASE]])
// CHECK: ret <vscale x 2 x double> %[[LOAD]]
return SVE_ACLE_FUNC(svdupq,_n,_f64,)(x0, x1);
}
svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3,
bool x4, bool x5, bool x6, bool x7,
bool x8, bool x9, bool x10, bool x11,
bool x12, bool x13, bool x14, bool x15)
{
// CHECK-LABEL: test_svdupq_n_b8
// CHECK-DAG: %[[ALLOCA:.*]] = alloca [16 x i8], align 16
// CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i8
// CHECK-DAG: %[[X15:.*]] = zext i1 %x15 to i8
// CHECK-DAG: %[[BASE:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 0
// CHECK-DAG: store i8 %[[X0]], i8* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[ALLOCA]], i64 0, i64 15
// CHECK: store i8 %[[X15]], i8* %[[GEP]], align 1
// CHECK-NOT: store
// CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], i8* nonnull %[[BASE]])
// CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
// CHECK: %[[CMP:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], <vscale x 16 x i8> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
// CHECK: ret <vscale x 16 x i1> %[[CMP]]
return SVE_ACLE_FUNC(svdupq,_n,_b8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
}
svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3,
bool x4, bool x5, bool x6, bool x7)
{
// CHECK-LABEL: test_svdupq_n_b16
// CHECK-DAG: %[[ALLOCA:.*]] = alloca [8 x i16], align 16
// CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i16
// CHECK-DAG: %[[X7:.*]] = zext i1 %x7 to i16
// CHECK-DAG: %[[BASE:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 0
// CHECK-DAG: store i16 %[[X0]], i16* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [8 x i16], [8 x i16]* %[[ALLOCA]], i64 0, i64 7
// CHECK: store i16 %[[X7]], i16* %[[GEP]], align 2
// CHECK-NOT: store
// CHECK: %[[PTRUE:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %{{.*}}, i16* nonnull %[[BASE]])
// CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
// CHECK: %[[CMP:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %{{.*}}, <vscale x 8 x i16> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[CMP]])
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
return SVE_ACLE_FUNC(svdupq,_n,_b16,)(x0, x1, x2, x3, x4, x5, x6, x7);
}
svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3)
{
// CHECK-LABEL: test_svdupq_n_b32
// CHECK-DAG: %[[ALLOCA:.*]] = alloca [4 x i32], align 16
// CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i32
// CHECK-DAG: %[[X3:.*]] = zext i1 %x3 to i32
// CHECK-DAG: %[[BASE:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 0
// CHECK-DAG: store i32 %[[X0]], i32* %[[BASE]], align 16
// <assume other stores>
// CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i32], [4 x i32]* %[[ALLOCA]], i64 0, i64 3
// CHECK: store i32 %[[X3]], i32* %[[GEP]], align 4
// CHECK-NOT: store
// CHECK: %[[PTRUE:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %{{.*}}, i32* nonnull %[[BASE]])
// CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %{{.*}}, <vscale x 4 x i32> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
return SVE_ACLE_FUNC(svdupq,_n,_b32,)(x0, x1, x2, x3);
}
svbool_t test_svdupq_n_b64(bool x0, bool x1)
{
// CHECK-LABEL: test_svdupq_n_b64
// CHECK-DAG: %[[ALLOCA:.*]] = alloca [2 x i64], align 16
// CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i64
// CHECK-DAG: %[[X1:.*]] = zext i1 %x1 to i64
// CHECK-DAG: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
// CHECK-DAG: store i64 %[[X0]], i64* %[[BASE]], align 16
// CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
// CHECK: store i64 %[[X1]], i64* %[[GEP]], align 8
// CHECK-NOT: store
// CHECK: %[[PTRUE:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
// CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %{{.*}}, <vscale x 2 x i64> %[[LOAD]], <vscale x 2 x i64> %[[ZERO]])
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
return SVE_ACLE_FUNC(svdupq,_n,_b64,)(x0, x1);
}
// This test checks that the `alloca` is added to the entry-block.
svint64_t test_svdupq_control_flow(int64_t x0, int64_t x1, svint64_t Default, bool P)
{
// CHECK-LABEL: test_svdupq_control_flow
// CHECK: entry:
// CHECK: %[[ALLOCA:.*]] = alloca [2 x i64], align 16
// CHECK: [[BR:.*]]:
// CHECK: %[[BASE:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 0
// CHECK: store i64 %x0, i64* %[[BASE]], align 16
// CHECK: %[[GEP:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[ALLOCA]], i64 0, i64 1
// CHECK: store i64 %x1, i64* %[[GEP]], align 8
// CHECK-NOT: store
// CHECK: call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %{{.*}}, i64* nonnull %[[BASE]])
// CHECK: [[END:.*]]:
// CHECK: %[[RETVAL:.*]] = phi <vscale x 2 x i64> [ %[[LOAD]], %if.end ], [ %Default, %entry ]
// CHECK: ret <vscale x 2 x i64> %[[RETVAL]]
if (P)
return Default;
return SVE_ACLE_FUNC(svdupq,_n,_s64,)(x0, x1);
}

View File

@ -94,7 +94,9 @@ public:
bool isDefault() const { return DefaultType; }
bool isFloat() const { return Float; }
bool isInteger() const { return !Float && !Predicate; }
bool isScalarPredicate() const { return !Float && ElementBitwidth == 1; }
bool isScalarPredicate() const {
return !Float && Predicate && NumVectors == 0;
}
bool isPredicateVector() const { return Predicate; }
bool isPredicatePattern() const { return PredicatePattern; }
bool isPrefetchOp() const { return PrefetchOp; }
@ -407,12 +409,12 @@ std::string SVEType::str() const {
if (Float)
S += "float";
else if (isScalarPredicate())
else if (isScalarPredicate() || isPredicateVector())
S += "bool";
else
S += "int";
if (!isScalarPredicate())
if (!isScalarPredicate() && !isPredicateVector())
S += utostr(ElementBitwidth);
if (!isScalableVector() && isVector())
S += "x" + utostr(getNumElements());
@ -433,7 +435,6 @@ void SVEType::applyTypespec() {
switch (I) {
case 'P':
Predicate = true;
ElementBitwidth = 1;
break;
case 'U':
Signed = false;