forked from OSchip/llvm-project
[SveEmitter] Add builtins for permutations and selection
This patch adds builtins for: - svlasta and svlastb - svclasta and svclastb - svunpkhi and svunpklo - svuzp1 and svuzp2 - svzip1 and svzip2 - svrev - svsel - svcompact - svsplice - svtbl
This commit is contained in:
parent
8f24c4b72f
commit
6f588c6ef3
|
@ -626,9 +626,6 @@ defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad">;
|
|||
defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla">;
|
||||
defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls">;
|
||||
defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb">;
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Permutations and selection
|
||||
def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Shifts
|
||||
|
@ -856,6 +853,44 @@ def SVCVTXNT_F32 : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch6
|
|||
def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
|
||||
def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Permutations and selection
|
||||
|
||||
def SVCLASTA : SInst<"svclasta[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clasta">;
|
||||
def SVCLASTA_N : SInst<"svclasta[_n_{d}]", "sPsd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clasta_n">;
|
||||
def SVCLASTB : SInst<"svclastb[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clastb">;
|
||||
def SVCLASTB_N : SInst<"svclastb[_n_{d}]", "sPsd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clastb_n">;
|
||||
def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact">;
|
||||
// SVDUP_LANE (to land in D78750)
|
||||
// SVDUPQ_LANE (to land in D78750)
|
||||
def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
|
||||
def SVLASTA : SInst<"svlasta[_{d}]", "sPd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_lasta">;
|
||||
def SVLASTB : SInst<"svlastb[_{d}]", "sPd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_lastb">;
|
||||
def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_rev">;
|
||||
def SVSEL : SInst<"svsel[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_sel">;
|
||||
def SVSPLICE : SInst<"svsplice[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_splice">;
|
||||
def SVTBL : SInst<"svtbl[_{d}]", "ddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">;
|
||||
def SVTRN1 : SInst<"svtrn1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1">;
|
||||
def SVTRN2 : SInst<"svtrn2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2">;
|
||||
def SVUNPKHI_S : SInst<"svunpkhi[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpkhi">;
|
||||
def SVUNPKHI_U : SInst<"svunpkhi[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpkhi">;
|
||||
def SVUNPKLO_S : SInst<"svunpklo[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpklo">;
|
||||
def SVUNPKLO_U : SInst<"svunpklo[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpklo">;
|
||||
def SVUZP1 : SInst<"svuzp1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1">;
|
||||
def SVUZP2 : SInst<"svuzp2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2">;
|
||||
def SVZIP1 : SInst<"svzip1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1">;
|
||||
def SVZIP2 : SInst<"svzip2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2">;
|
||||
|
||||
def SVREV_B : SInst<"svrev_{d}", "PP", "PcPsPiPl", MergeNone, "aarch64_sve_rev">;
|
||||
def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel">;
|
||||
def SVTRN1_B : SInst<"svtrn1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn1">;
|
||||
def SVTRN2_B : SInst<"svtrn2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn2">;
|
||||
def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi">;
|
||||
def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo">;
|
||||
def SVUZP1_B : SInst<"svuzp1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp1">;
|
||||
def SVUZP2_B : SInst<"svuzp2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp2">;
|
||||
def SVZIP1_B : SInst<"svzip1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip1">;
|
||||
def SVZIP2_B : SInst<"svzip2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip2">;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Predicate creation
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svclasta_s8(svbool_t pg, svint8_t fallback, svint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.clasta.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_s8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svint16_t test_svclasta_s16(svbool_t pg, svint16_t fallback, svint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.clasta.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_s16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svint32_t test_svclasta_s32(svbool_t pg, svint32_t fallback, svint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.clasta.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_s32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svint64_t test_svclasta_s64(svbool_t pg, svint64_t fallback, svint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.clasta.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_s64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint8_t test_svclasta_u8(svbool_t pg, svuint8_t fallback, svuint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.clasta.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_u8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint16_t test_svclasta_u16(svbool_t pg, svuint16_t fallback, svuint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.clasta.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_u16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint32_t test_svclasta_u32(svbool_t pg, svuint32_t fallback, svuint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.clasta.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_u32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint64_t test_svclasta_u64(svbool_t pg, svuint64_t fallback, svuint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.clasta.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_u64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svfloat16_t test_svclasta_f16(svbool_t pg, svfloat16_t fallback, svfloat16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.clasta.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %fallback, <vscale x 8 x half> %data)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_f16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svfloat32_t test_svclasta_f32(svbool_t pg, svfloat32_t fallback, svfloat32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.clasta.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %fallback, <vscale x 4 x float> %data)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_f32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svfloat64_t test_svclasta_f64(svbool_t pg, svfloat64_t fallback, svfloat64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.clasta.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %fallback, <vscale x 2 x double> %data)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_f64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int8_t test_svclasta_n_s8(svbool_t pg, int8_t fallback, svint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clasta.n.nxv16i8(<vscale x 16 x i1> %pg, i8 %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_s8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int16_t test_svclasta_n_s16(svbool_t pg, int16_t fallback, svint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clasta.n.nxv8i16(<vscale x 8 x i1> %[[PG]], i16 %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_s16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int32_t test_svclasta_n_s32(svbool_t pg, int32_t fallback, svint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clasta.n.nxv4i32(<vscale x 4 x i1> %[[PG]], i32 %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_s32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int64_t test_svclasta_n_s64(svbool_t pg, int64_t fallback, svint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clasta.n.nxv2i64(<vscale x 2 x i1> %[[PG]], i64 %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_s64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint8_t test_svclasta_n_u8(svbool_t pg, uint8_t fallback, svuint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clasta.n.nxv16i8(<vscale x 16 x i1> %pg, i8 %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_u8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint16_t test_svclasta_n_u16(svbool_t pg, uint16_t fallback, svuint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clasta.n.nxv8i16(<vscale x 8 x i1> %[[PG]], i16 %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_u16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint32_t test_svclasta_n_u32(svbool_t pg, uint32_t fallback, svuint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clasta.n.nxv4i32(<vscale x 4 x i1> %[[PG]], i32 %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_u32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint64_t test_svclasta_n_u64(svbool_t pg, uint64_t fallback, svuint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clasta.n.nxv2i64(<vscale x 2 x i1> %[[PG]], i64 %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_u64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
float16_t test_svclasta_n_f16(svbool_t pg, float16_t fallback, svfloat16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.clasta.n.nxv8f16(<vscale x 8 x i1> %[[PG]], half %fallback, <vscale x 8 x half> %data)
|
||||
// CHECK: ret half %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_f16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
float32_t test_svclasta_n_f32(svbool_t pg, float32_t fallback, svfloat32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.clasta.n.nxv4f32(<vscale x 4 x i1> %[[PG]], float %fallback, <vscale x 4 x float> %data)
|
||||
// CHECK: ret float %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_f32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
float64_t test_svclasta_n_f64(svbool_t pg, float64_t fallback, svfloat64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclasta_n_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.clasta.n.nxv2f64(<vscale x 2 x i1> %[[PG]], double %fallback, <vscale x 2 x double> %data)
|
||||
// CHECK: ret double %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclasta,_n_f64,,)(pg, fallback, data);
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svclastb_s8(svbool_t pg, svint8_t fallback, svint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.clastb.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_s8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svint16_t test_svclastb_s16(svbool_t pg, svint16_t fallback, svint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.clastb.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_s16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svint32_t test_svclastb_s32(svbool_t pg, svint32_t fallback, svint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.clastb.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_s32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svint64_t test_svclastb_s64(svbool_t pg, svint64_t fallback, svint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.clastb.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_s64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint8_t test_svclastb_u8(svbool_t pg, svuint8_t fallback, svuint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.clastb.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_u8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint16_t test_svclastb_u16(svbool_t pg, svuint16_t fallback, svuint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.clastb.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_u16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint32_t test_svclastb_u32(svbool_t pg, svuint32_t fallback, svuint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.clastb.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_u32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svuint64_t test_svclastb_u64(svbool_t pg, svuint64_t fallback, svuint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.clastb.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_u64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svfloat16_t test_svclastb_f16(svbool_t pg, svfloat16_t fallback, svfloat16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.clastb.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %fallback, <vscale x 8 x half> %data)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_f16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svfloat32_t test_svclastb_f32(svbool_t pg, svfloat32_t fallback, svfloat32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.clastb.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %fallback, <vscale x 4 x float> %data)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_f32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
svfloat64_t test_svclastb_f64(svbool_t pg, svfloat64_t fallback, svfloat64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.clastb.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %fallback, <vscale x 2 x double> %data)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_f64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int8_t test_svclastb_n_s8(svbool_t pg, int8_t fallback, svint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clastb.n.nxv16i8(<vscale x 16 x i1> %pg, i8 %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_s8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int16_t test_svclastb_n_s16(svbool_t pg, int16_t fallback, svint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clastb.n.nxv8i16(<vscale x 8 x i1> %[[PG]], i16 %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_s16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int32_t test_svclastb_n_s32(svbool_t pg, int32_t fallback, svint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clastb.n.nxv4i32(<vscale x 4 x i1> %[[PG]], i32 %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_s32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
int64_t test_svclastb_n_s64(svbool_t pg, int64_t fallback, svint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clastb.n.nxv2i64(<vscale x 2 x i1> %[[PG]], i64 %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_s64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint8_t test_svclastb_n_u8(svbool_t pg, uint8_t fallback, svuint8_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clastb.n.nxv16i8(<vscale x 16 x i1> %pg, i8 %fallback, <vscale x 16 x i8> %data)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_u8,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint16_t test_svclastb_n_u16(svbool_t pg, uint16_t fallback, svuint16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clastb.n.nxv8i16(<vscale x 8 x i1> %[[PG]], i16 %fallback, <vscale x 8 x i16> %data)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_u16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint32_t test_svclastb_n_u32(svbool_t pg, uint32_t fallback, svuint32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clastb.n.nxv4i32(<vscale x 4 x i1> %[[PG]], i32 %fallback, <vscale x 4 x i32> %data)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_u32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
uint64_t test_svclastb_n_u64(svbool_t pg, uint64_t fallback, svuint64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clastb.n.nxv2i64(<vscale x 2 x i1> %[[PG]], i64 %fallback, <vscale x 2 x i64> %data)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_u64,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
float16_t test_svclastb_n_f16(svbool_t pg, float16_t fallback, svfloat16_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> %[[PG]], half %fallback, <vscale x 8 x half> %data)
|
||||
// CHECK: ret half %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_f16,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
float32_t test_svclastb_n_f32(svbool_t pg, float32_t fallback, svfloat32_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> %[[PG]], float %fallback, <vscale x 4 x float> %data)
|
||||
// CHECK: ret float %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_f32,,)(pg, fallback, data);
|
||||
}
|
||||
|
||||
float64_t test_svclastb_n_f64(svbool_t pg, float64_t fallback, svfloat64_t data)
|
||||
{
|
||||
// CHECK-LABEL: test_svclastb_n_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> %[[PG]], double %fallback, <vscale x 2 x double> %data)
|
||||
// CHECK: ret double %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svclastb,_n_f64,,)(pg, fallback, data);
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint32_t test_svcompact_s32(svbool_t pg, svint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svcompact_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.compact.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svcompact,_s32,,)(pg, op);
|
||||
}
|
||||
|
||||
svint64_t test_svcompact_s64(svbool_t pg, svint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svcompact_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.compact.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svcompact,_s64,,)(pg, op);
|
||||
}
|
||||
|
||||
svuint32_t test_svcompact_u32(svbool_t pg, svuint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svcompact_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.compact.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svcompact,_u32,,)(pg, op);
|
||||
}
|
||||
|
||||
svuint64_t test_svcompact_u64(svbool_t pg, svuint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svcompact_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.compact.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svcompact,_u64,,)(pg, op);
|
||||
}
|
||||
|
||||
svfloat32_t test_svcompact_f32(svbool_t pg, svfloat32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svcompact_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.compact.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svcompact,_f32,,)(pg, op);
|
||||
}
|
||||
|
||||
svfloat64_t test_svcompact_f64(svbool_t pg, svfloat64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svcompact_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.compact.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svcompact,_f64,,)(pg, op);
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
int8_t test_svlasta_s8(svbool_t pg, svint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lasta.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_s8,,)(pg, op);
|
||||
}
|
||||
|
||||
int16_t test_svlasta_s16(svbool_t pg, svint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lasta.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_s16,,)(pg, op);
|
||||
}
|
||||
|
||||
int32_t test_svlasta_s32(svbool_t pg, svint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lasta.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_s32,,)(pg, op);
|
||||
}
|
||||
|
||||
int64_t test_svlasta_s64(svbool_t pg, svint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lasta.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_s64,,)(pg, op);
|
||||
}
|
||||
|
||||
uint8_t test_svlasta_u8(svbool_t pg, svuint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lasta.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_u8,,)(pg, op);
|
||||
}
|
||||
|
||||
uint16_t test_svlasta_u16(svbool_t pg, svuint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lasta.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_u16,,)(pg, op);
|
||||
}
|
||||
|
||||
uint32_t test_svlasta_u32(svbool_t pg, svuint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lasta.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_u32,,)(pg, op);
|
||||
}
|
||||
|
||||
uint64_t test_svlasta_u64(svbool_t pg, svuint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lasta.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_u64,,)(pg, op);
|
||||
}
|
||||
|
||||
float16_t test_svlasta_f16(svbool_t pg, svfloat16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.lasta.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
|
||||
// CHECK: ret half %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_f16,,)(pg, op);
|
||||
}
|
||||
|
||||
float32_t test_svlasta_f32(svbool_t pg, svfloat32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.lasta.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
|
||||
// CHECK: ret float %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_f32,,)(pg, op);
|
||||
}
|
||||
|
||||
float64_t test_svlasta_f64(svbool_t pg, svfloat64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlasta_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.lasta.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
|
||||
// CHECK: ret double %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlasta,_f64,,)(pg, op);
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
int8_t test_svlastb_s8(svbool_t pg, svint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lastb.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_s8,,)(pg, op);
|
||||
}
|
||||
|
||||
int16_t test_svlastb_s16(svbool_t pg, svint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lastb.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_s16,,)(pg, op);
|
||||
}
|
||||
|
||||
int32_t test_svlastb_s32(svbool_t pg, svint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lastb.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_s32,,)(pg, op);
|
||||
}
|
||||
|
||||
int64_t test_svlastb_s64(svbool_t pg, svint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lastb.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_s64,,)(pg, op);
|
||||
}
|
||||
|
||||
uint8_t test_svlastb_u8(svbool_t pg, svuint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lastb.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
|
||||
// CHECK: ret i8 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_u8,,)(pg, op);
|
||||
}
|
||||
|
||||
uint16_t test_svlastb_u16(svbool_t pg, svuint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lastb.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
|
||||
// CHECK: ret i16 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_u16,,)(pg, op);
|
||||
}
|
||||
|
||||
uint32_t test_svlastb_u32(svbool_t pg, svuint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lastb.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
|
||||
// CHECK: ret i32 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_u32,,)(pg, op);
|
||||
}
|
||||
|
||||
uint64_t test_svlastb_u64(svbool_t pg, svuint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lastb.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
|
||||
// CHECK: ret i64 %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_u64,,)(pg, op);
|
||||
}
|
||||
|
||||
float16_t test_svlastb_f16(svbool_t pg, svfloat16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.lastb.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
|
||||
// CHECK: ret half %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_f16,,)(pg, op);
|
||||
}
|
||||
|
||||
float32_t test_svlastb_f32(svbool_t pg, svfloat32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.lastb.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
|
||||
// CHECK: ret float %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_f32,,)(pg, op);
|
||||
}
|
||||
|
||||
float64_t test_svlastb_f64(svbool_t pg, svfloat64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svlastb_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.lastb.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
|
||||
// CHECK: ret double %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svlastb,_f64,,)(pg, op);
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svrev_s8(svint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> %op)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_s8,,)(op);
|
||||
}
|
||||
|
||||
svint16_t test_svrev_s16(svint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> %op)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_s16,,)(op);
|
||||
}
|
||||
|
||||
svint32_t test_svrev_s32(svint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_s32,,)(op);
|
||||
}
|
||||
|
||||
svint64_t test_svrev_s64(svint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_s64,,)(op);
|
||||
}
|
||||
|
||||
svuint8_t test_svrev_u8(svuint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.rev.nxv16i8(<vscale x 16 x i8> %op)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_u8,,)(op);
|
||||
}
|
||||
|
||||
svuint16_t test_svrev_u16(svuint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.rev.nxv8i16(<vscale x 8 x i16> %op)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_u16,,)(op);
|
||||
}
|
||||
|
||||
svuint32_t test_svrev_u32(svuint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.rev.nxv4i32(<vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_u32,,)(op);
|
||||
}
|
||||
|
||||
svuint64_t test_svrev_u64(svuint64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.rev.nxv2i64(<vscale x 2 x i64> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_u64,,)(op);
|
||||
}
|
||||
|
||||
svfloat16_t test_svrev_f16(svfloat16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.rev.nxv8f16(<vscale x 8 x half> %op)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_f16,,)(op);
|
||||
}
|
||||
|
||||
svfloat32_t test_svrev_f32(svfloat32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.rev.nxv4f32(<vscale x 4 x float> %op)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_f32,,)(op);
|
||||
}
|
||||
|
||||
svfloat64_t test_svrev_f64(svfloat64_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.rev.nxv2f64(<vscale x 2 x double> %op)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svrev,_f64,,)(op);
|
||||
}
|
||||
|
||||
svbool_t test_svrev_b8(svbool_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.rev.nxv16i1(<vscale x 16 x i1> %op)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svrev_b8(op);
|
||||
}
|
||||
|
||||
svbool_t test_svrev_b16(svbool_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_b16
|
||||
// CHECK: %[[OP:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.rev.nxv8i1(<vscale x 8 x i1> %[[OP]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svrev_b16(op);
|
||||
}
|
||||
|
||||
svbool_t test_svrev_b32(svbool_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_b32
|
||||
// CHECK: %[[OP:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.rev.nxv4i1(<vscale x 4 x i1> %[[OP]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svrev_b32(op);
|
||||
}
|
||||
|
||||
svbool_t test_svrev_b64(svbool_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svrev_b64
|
||||
// CHECK: %[[OP:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.rev.nxv2i1(<vscale x 2 x i1> %[[OP]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svrev_b64(op);
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svsel_s8(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_s8,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsel_s16(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_s16,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsel_s32(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_s32,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsel_s64(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_s64,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsel_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_u8,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsel_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_u16,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsel_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_u32,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsel_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_u64,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svsel_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.sel.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_f16,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svsel_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.sel.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_f32,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svsel_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_f64,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svsel_b(svbool_t pg, svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsel_b
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsel,_b,,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svsplice_s8(svbool_t pg, svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.splice.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_s8,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svsplice_s16(svbool_t pg, svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_s16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.splice.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_s16,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svsplice_s32(svbool_t pg, svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_s32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.splice.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_s32,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svsplice_s64(svbool_t pg, svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_s64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.splice.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_s64,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svsplice_u8(svbool_t pg, svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.splice.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_u8,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svsplice_u16(svbool_t pg, svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_u16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.splice.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_u16,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svsplice_u32(svbool_t pg, svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_u32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.splice.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_u32,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svsplice_u64(svbool_t pg, svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_u64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.splice.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_u64,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svsplice_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_f16
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.splice.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_f16,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svsplice_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_f32
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.splice.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_f32,,)(pg, op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svsplice_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svsplice_f64
|
||||
// CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.splice.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svsplice,_f64,,)(pg, op1, op2);
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svtbl_s8(svint8_t data, svuint8_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> %indices)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_s8,,)(data, indices);
|
||||
}
|
||||
|
||||
svint16_t test_svtbl_s16(svint16_t data, svuint16_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> %indices)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_s16,,)(data, indices);
|
||||
}
|
||||
|
||||
svint32_t test_svtbl_s32(svint32_t data, svuint32_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> %indices)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_s32,,)(data, indices);
|
||||
}
|
||||
|
||||
svint64_t test_svtbl_s64(svint64_t data, svuint64_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> %indices)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_s64,,)(data, indices);
|
||||
}
|
||||
|
||||
svuint8_t test_svtbl_u8(svuint8_t data, svuint8_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> %indices)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_u8,,)(data, indices);
|
||||
}
|
||||
|
||||
svuint16_t test_svtbl_u16(svuint16_t data, svuint16_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> %indices)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_u16,,)(data, indices);
|
||||
}
|
||||
|
||||
svuint32_t test_svtbl_u32(svuint32_t data, svuint32_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> %indices)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_u32,,)(data, indices);
|
||||
}
|
||||
|
||||
svuint64_t test_svtbl_u64(svuint64_t data, svuint64_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> %indices)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_u64,,)(data, indices);
|
||||
}
|
||||
|
||||
svfloat16_t test_svtbl_f16(svfloat16_t data, svuint16_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i16> %indices)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_f16,,)(data, indices);
|
||||
}
|
||||
|
||||
svfloat32_t test_svtbl_f32(svfloat32_t data, svuint32_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i32> %indices)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_f32,,)(data, indices);
|
||||
}
|
||||
|
||||
svfloat64_t test_svtbl_f64(svfloat64_t data, svuint64_t indices)
|
||||
{
|
||||
// CHECK-LABEL: test_svtbl_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i64> %indices)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtbl,_f64,,)(data, indices);
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svtrn1_s8(svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn1.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_s8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svtrn1_s16(svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn1.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_s16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svtrn1_s32(svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn1.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_s32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svtrn1_s64(svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn1.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_s64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svtrn1_u8(svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn1.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_u8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svtrn1_u16(svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn1.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_u16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svtrn1_u32(svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn1.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_u32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svtrn1_u64(svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn1.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_u64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svtrn1_f16(svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.trn1.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_f16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svtrn1_f32(svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.trn1.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_f32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svtrn1_f64(svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.trn1.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn1,_f64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn1_b8(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.trn1.nxv16i1(<vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svtrn1_b8(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn1_b16(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_b16
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.trn1.nxv8i1(<vscale x 8 x i1> %[[OP1]], <vscale x 8 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svtrn1_b16(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn1_b32(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_b32
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.trn1.nxv4i1(<vscale x 4 x i1> %[[OP1]], <vscale x 4 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svtrn1_b32(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn1_b64(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn1_b64
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.trn1.nxv2i1(<vscale x 2 x i1> %[[OP1]], <vscale x 2 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svtrn1_b64(op1, op2);
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svtrn2_s8(svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn2.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_s8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svtrn2_s16(svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn2.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_s16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svtrn2_s32(svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn2.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_s32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svtrn2_s64(svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn2.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_s64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svtrn2_u8(svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn2.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_u8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svtrn2_u16(svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn2.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_u16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svtrn2_u32(svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn2.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_u32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svtrn2_u64(svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn2.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_u64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svtrn2_f16(svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.trn2.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_f16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svtrn2_f32(svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.trn2.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_f32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svtrn2_f64(svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.trn2.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svtrn2,_f64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn2_b8(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.trn2.nxv16i1(<vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svtrn2_b8(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn2_b16(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_b16
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.trn2.nxv8i1(<vscale x 8 x i1> %[[OP1]], <vscale x 8 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svtrn2_b16(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn2_b32(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_b32
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.trn2.nxv4i1(<vscale x 4 x i1> %[[OP1]], <vscale x 4 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svtrn2_b32(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svtrn2_b64(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svtrn2_b64
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.trn2.nxv2i1(<vscale x 2 x i1> %[[OP1]], <vscale x 2 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svtrn2_b64(op1, op2);
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint16_t test_svunpkhi_s16(svint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sunpkhi.nxv8i16(<vscale x 16 x i8> %op)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_s16,,)(op);
|
||||
}
|
||||
|
||||
svint32_t test_svunpkhi_s32(svint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sunpkhi.nxv4i32(<vscale x 8 x i16> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_s32,,)(op);
|
||||
}
|
||||
|
||||
svint64_t test_svunpkhi_s64(svint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sunpkhi.nxv2i64(<vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_s64,,)(op);
|
||||
}
|
||||
|
||||
svuint16_t test_svunpkhi_u16(svuint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpkhi.nxv8i16(<vscale x 16 x i8> %op)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_u16,,)(op);
|
||||
}
|
||||
|
||||
svuint32_t test_svunpkhi_u32(svuint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpkhi.nxv4i32(<vscale x 8 x i16> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_u32,,)(op);
|
||||
}
|
||||
|
||||
svuint64_t test_svunpkhi_u64(svuint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpkhi.nxv2i64(<vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_u64,,)(op);
|
||||
}
|
||||
|
||||
svbool_t test_svunpkhi_b(svbool_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpkhi_b
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.punpkhi.nxv16i1(<vscale x 16 x i1> %op)
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return SVE_ACLE_FUNC(svunpkhi,_b,,)(op);
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint16_t test_svunpklo_s16(svint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sunpklo.nxv8i16(<vscale x 16 x i8> %op)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_s16,,)(op);
|
||||
}
|
||||
|
||||
svint32_t test_svunpklo_s32(svint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sunpklo.nxv4i32(<vscale x 8 x i16> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_s32,,)(op);
|
||||
}
|
||||
|
||||
svint64_t test_svunpklo_s64(svint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sunpklo.nxv2i64(<vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_s64,,)(op);
|
||||
}
|
||||
|
||||
svuint16_t test_svunpklo_u16(svuint8_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %op)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_u16,,)(op);
|
||||
}
|
||||
|
||||
svuint32_t test_svunpklo_u32(svuint16_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %op)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_u32,,)(op);
|
||||
}
|
||||
|
||||
svuint64_t test_svunpklo_u64(svuint32_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %op)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_u64,,)(op);
|
||||
}
|
||||
|
||||
svbool_t test_svunpklo_b(svbool_t op)
|
||||
{
|
||||
// CHECK-LABEL: test_svunpklo_b
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.punpklo.nxv16i1(<vscale x 16 x i1> %op)
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return SVE_ACLE_FUNC(svunpklo,_b,,)(op);
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svuzp1_s8(svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_s8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svuzp1_s16(svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_s16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svuzp1_s32(svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_s32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svuzp1_s64(svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp1.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_s64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svuzp1_u8(svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_u8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svuzp1_u16(svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_u16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svuzp1_u32(svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_u32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svuzp1_u64(svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp1.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_u64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svuzp1_f16(svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.uzp1.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_f16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svuzp1_f32(svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.uzp1.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_f32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svuzp1_f64(svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.uzp1.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp1,_f64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp1_b8(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.uzp1.nxv16i1(<vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svuzp1_b8(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp1_b16(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_b16
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.uzp1.nxv8i1(<vscale x 8 x i1> %[[OP1]], <vscale x 8 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svuzp1_b16(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp1_b32(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_b32
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.uzp1.nxv4i1(<vscale x 4 x i1> %[[OP1]], <vscale x 4 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svuzp1_b32(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp1_b64(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp1_b64
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.uzp1.nxv2i1(<vscale x 2 x i1> %[[OP1]], <vscale x 2 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svuzp1_b64(op1, op2);
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svuzp2_s8(svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp2.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_s8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svuzp2_s16(svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp2.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_s16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svuzp2_s32(svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp2.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_s32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svuzp2_s64(svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp2.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_s64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svuzp2_u8(svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp2.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_u8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svuzp2_u16(svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp2.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_u16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svuzp2_u32(svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp2.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_u32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svuzp2_u64(svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp2.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_u64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svuzp2_f16(svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.uzp2.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_f16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svuzp2_f32(svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.uzp2.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_f32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svuzp2_f64(svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.uzp2.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svuzp2,_f64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp2_b8(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.uzp2.nxv16i1(<vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svuzp2_b8(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp2_b16(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_b16
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.uzp2.nxv8i1(<vscale x 8 x i1> %[[OP1]], <vscale x 8 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svuzp2_b16(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp2_b32(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_b32
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.uzp2.nxv4i1(<vscale x 4 x i1> %[[OP1]], <vscale x 4 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svuzp2_b32(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svuzp2_b64(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svuzp2_b64
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.uzp2.nxv2i1(<vscale x 2 x i1> %[[OP1]], <vscale x 2 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svuzp2_b64(op1, op2);
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svzip1_s8(svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip1.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_s8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svzip1_s16(svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip1.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_s16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svzip1_s32(svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip1.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_s32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svzip1_s64(svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip1.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_s64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svzip1_u8(svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip1.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_u8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svzip1_u16(svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip1.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_u16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svzip1_u32(svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip1.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_u32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svzip1_u64(svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip1.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_u64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svzip1_f16(svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.zip1.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_f16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svzip1_f32(svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.zip1.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_f32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svzip1_f64(svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.zip1.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip1,_f64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip1_b8(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.zip1.nxv16i1(<vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svzip1_b8(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip1_b16(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_b16
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.zip1.nxv8i1(<vscale x 8 x i1> %[[OP1]], <vscale x 8 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svzip1_b16(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip1_b32(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_b32
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.zip1.nxv4i1(<vscale x 4 x i1> %[[OP1]], <vscale x 4 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svzip1_b32(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip1_b64(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip1_b64
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.zip1.nxv2i1(<vscale x 2 x i1> %[[OP1]], <vscale x 2 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svzip1_b64(op1, op2);
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_sve.h>
|
||||
|
||||
#ifdef SVE_OVERLOADED_FORMS
|
||||
// A simple used,unused... macro, long enough to represent any SVE builtin.
|
||||
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
|
||||
#else
|
||||
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
||||
#endif
|
||||
|
||||
svint8_t test_svzip2_s8(svint8_t op1, svint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_s8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip2.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_s8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint16_t test_svzip2_s16(svint16_t op1, svint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_s16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip2.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_s16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint32_t test_svzip2_s32(svint32_t op1, svint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_s32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip2.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_s32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svint64_t test_svzip2_s64(svint64_t op1, svint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_s64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip2.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_s64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint8_t test_svzip2_u8(svuint8_t op1, svuint8_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_u8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip2.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
|
||||
// CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_u8,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint16_t test_svzip2_u16(svuint16_t op1, svuint16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_u16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip2.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
|
||||
// CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_u16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint32_t test_svzip2_u32(svuint32_t op1, svuint32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_u32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip2.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
|
||||
// CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_u32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svuint64_t test_svzip2_u64(svuint64_t op1, svuint64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_u64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip2.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
|
||||
// CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_u64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat16_t test_svzip2_f16(svfloat16_t op1, svfloat16_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_f16
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.zip2.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2)
|
||||
// CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_f16,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat32_t test_svzip2_f32(svfloat32_t op1, svfloat32_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_f32
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.zip2.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2)
|
||||
// CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_f32,,)(op1, op2);
|
||||
}
|
||||
|
||||
svfloat64_t test_svzip2_f64(svfloat64_t op1, svfloat64_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_f64
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.zip2.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2)
|
||||
// CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
|
||||
return SVE_ACLE_FUNC(svzip2,_f64,,)(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip2_b8(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_b8
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.zip2.nxv16i1(<vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
|
||||
// CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
|
||||
return svzip2_b8(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip2_b16(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_b16
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.zip2.nxv8i1(<vscale x 8 x i1> %[[OP1]], <vscale x 8 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svzip2_b16(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip2_b32(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_b32
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.zip2.nxv4i1(<vscale x 4 x i1> %[[OP1]], <vscale x 4 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svzip2_b32(op1, op2);
|
||||
}
|
||||
|
||||
svbool_t test_svzip2_b64(svbool_t op1, svbool_t op2)
|
||||
{
|
||||
// CHECK-LABEL: test_svzip2_b64
|
||||
// CHECK-DAG: %[[OP1:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op1)
|
||||
// CHECK-DAG: %[[OP2:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %op2)
|
||||
// CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.zip2.nxv2i1(<vscale x 2 x i1> %[[OP1]], <vscale x 2 x i1> %[[OP2]])
|
||||
// CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
|
||||
// CHECK: ret <vscale x 16 x i1> %[[CAST]]
|
||||
return svzip2_b64(op1, op2);
|
||||
}
|
Loading…
Reference in New Issue