forked from OSchip/llvm-project
[SveEmitter] Implement zeroing of false lanes
This implements zeroing of false lanes for binary operations, where instead of merging into the first operand vector (_m) a `select` is placed on the first input vector. This approach easily translates to the use of the `zeroing movprfx` instruction. This patch also adds builtins for svabd, svadd, svdiv, svdivr, svmax, svmin, svmul, svmulh, svub and svsubr. Reviewers: SjoerdMeijer, efriedma, rovka Reviewed By: efriedma Tags: #clang Differential Revision: https://reviews.llvm.org/D77593
This commit is contained in:
parent
92bf405ea6
commit
06c980df46
|
@ -308,6 +308,30 @@ multiclass SInstZPZ<string name, string types, string intrinsic, list<FlagType>
|
|||
defm SVABS : SInstZPZ<"svabs", "csil", "aarch64_sve_abs">;
|
||||
defm SVNEG : SInstZPZ<"svneg", "csil", "aarch64_sve_neg">;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
multiclass SInstZPZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
|
||||
def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, intrinsic, flags>;
|
||||
def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
|
||||
def _Z : SInst<name # "[_{d}]", "dPdd", types, MergeZero, intrinsic, flags>;
|
||||
}
|
||||
|
||||
defm SVABD_S : SInstZPZZ<"svabd", "csil", "aarch64_sve_sabd">;
|
||||
defm SVABD_U : SInstZPZZ<"svabd", "UcUsUiUl", "aarch64_sve_uabd">;
|
||||
defm SVADD : SInstZPZZ<"svadd", "csilUcUsUiUl", "aarch64_sve_add">;
|
||||
defm SVDIV_S : SInstZPZZ<"svdiv", "il", "aarch64_sve_sdiv">;
|
||||
defm SVDIV_U : SInstZPZZ<"svdiv", "UiUl", "aarch64_sve_udiv">;
|
||||
defm SVDIVR_S : SInstZPZZ<"svdivr", "il", "aarch64_sve_sdivr">;
|
||||
defm SVDIVR_U : SInstZPZZ<"svdivr", "UiUl", "aarch64_sve_udivr">;
|
||||
defm SVMAX_S : SInstZPZZ<"svmax", "csil", "aarch64_sve_smax">;
|
||||
defm SVMAX_U : SInstZPZZ<"svmax", "UcUsUiUl", "aarch64_sve_umax">;
|
||||
defm SVMIN_S : SInstZPZZ<"svmin", "csil", "aarch64_sve_smin">;
|
||||
defm SVMIN_U : SInstZPZZ<"svmin", "UcUsUiUl", "aarch64_sve_umin">;
|
||||
defm SVMUL : SInstZPZZ<"svmul", "csilUcUsUiUl", "aarch64_sve_mul">;
|
||||
defm SVMULH_S : SInstZPZZ<"svmulh", "csil", "aarch64_sve_smulh">;
|
||||
defm SVMULH_U : SInstZPZZ<"svmulh", "UcUsUiUl", "aarch64_sve_umulh">;
|
||||
defm SVSUB : SInstZPZZ<"svsub", "csilUcUsUiUl", "aarch64_sve_sub">;
|
||||
defm SVSUBR : SInstZPZZ<"svsubr", "csilUcUsUiUl", "aarch64_sve_subr">;
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Permutations and selection
|
||||
def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
|
||||
|
|
|
@ -7657,6 +7657,14 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
|
|||
}
|
||||
}
|
||||
|
||||
// Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
|
||||
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
|
||||
llvm::Type *OpndTy = Ops[1]->getType();
|
||||
auto *SplatZero = Constant::getNullValue(OpndTy);
|
||||
Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
|
||||
Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
|
||||
}
|
||||
|
||||
Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, OverloadedTy);
|
||||
Value *Call = Builder.CreateCall(F, Ops);
|
||||
return Call;
|
||||
|
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svabd_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svabd_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svabd_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svabd_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svabd_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svabd_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svabd_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svabd_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svabd_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svabd_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svabd_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svabd_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svabd_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svabd_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svabd_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svabd_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svabd_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svabd_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svabd_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svabd_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svabd_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svabd_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svabd_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svabd_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svabd_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svabd,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svadd_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svadd_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svadd_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svadd_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svadd_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svadd_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svadd_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svadd_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svadd_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svadd_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svadd_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svadd_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svadd_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svadd_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svadd_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svadd_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svadd_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svadd_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svadd_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svadd_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svadd_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svadd_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svadd_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svadd_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svadd_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svadd,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint32_t test_svdiv_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svdiv_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svdiv_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svdiv_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svdiv_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svdiv_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svdiv_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svdiv_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svdiv_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svdiv_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svdiv_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svdiv_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdiv_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdiv,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint32_t test_svdivr_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svdivr_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svdivr_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svdivr_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svdivr_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svdivr_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svdivr_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svdivr_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svdivr_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svdivr_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svdivr_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svdivr_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svdivr_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svdivr,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svmax_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmax_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmax_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmax_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmax_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmax_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmax_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmax_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmax_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmax_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmax_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmax_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmax_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmax_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmax_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmax_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmax_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmax_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmax_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmax_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmax_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmax_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmax_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmax_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmax_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmax,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svmin_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmin_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmin_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmin_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmin_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmin_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmin_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmin_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmin_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmin_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmin_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmin_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmin_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmin_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmin_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmin_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmin_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmin_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmin_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmin_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmin_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmin_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmin_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmin_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmin_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmin,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svmul_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmul_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmul_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmul_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmul_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmul_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmul_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmul_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmul_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmul_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmul_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmul_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmul_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmul_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmul_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmul_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmul_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmul_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmul_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmul_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmul_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmul_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmul_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmul_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmul_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmul,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svmulh_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmulh_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmulh_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmulh_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmulh_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmulh_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmulh_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmulh_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmulh_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmulh_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmulh_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmulh_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmulh_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmulh_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmulh_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmulh_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svmulh_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svmulh_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svmulh_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svmulh_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svmulh_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svmulh_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svmulh_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svmulh_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svmulh_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svmulh,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svsub_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsub_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsub_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsub_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsub_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsub_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsub_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsub_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svsub_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsub_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsub_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsub_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsub_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsub_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsub_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsub_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svsub_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsub_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsub_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsub_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsub_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsub_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsub_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsub_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsub_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsub,_u64,_x,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svsubr_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsubr_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsubr_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsubr_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsubr_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u8_z
|
||||
// CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u8,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsubr_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u16_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u16,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsubr_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u32_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u32,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsubr_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u64_z
|
||||
// CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u64,_z,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svsubr_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsubr_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsubr_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsubr_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsubr_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u8_m
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u8,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsubr_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u16_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u16,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsubr_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u32_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u32,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsubr_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u64_m
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u64,_m,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint8_t test_svsubr_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsubr_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsubr_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsubr_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_s64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_s64,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsubr_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u8_x
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u8,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsubr_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u16_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u16,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsubr_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u32_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u32,_x,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsubr_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsubr_u64_x
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsubr,_u64,_x,)(pg, op1, op2);
|
||||
}
|
Loading…
Reference in New Issue