forked from OSchip/llvm-project
[IR] Move vector.insert/vector.extract out of experimental namespace
These intrinsics are now fundemental for SVE code generation and have been present for a year and a half, hence move them out of the experimental namespace. Differential Revision: https://reviews.llvm.org/D127976
This commit is contained in:
parent
0b998053db
commit
a83aa33d1b
|
@ -2196,7 +2196,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
|
|||
// C/C++ Operand: VecTy, IR Operand: VecTy, Index
|
||||
let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", MaskedPolicy = NonePolicy,
|
||||
ManualCodegen = [{ {
|
||||
ID = Intrinsic::experimental_vector_extract;
|
||||
ID = Intrinsic::vector_extract;
|
||||
IntrinsicTypes = {ResultType, Ops[0]->getType()};
|
||||
Ops.push_back(ConstantInt::get(Int64Ty, 0));
|
||||
return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
|
||||
|
@ -2214,7 +2214,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
|
|||
// C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
|
||||
let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext", MaskedPolicy = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
ID = Intrinsic::experimental_vector_insert;
|
||||
ID = Intrinsic::vector_insert;
|
||||
IntrinsicTypes = {ResultType, Ops[0]->getType()};
|
||||
Ops.push_back(llvm::UndefValue::get(ResultType));
|
||||
std::swap(Ops[0], Ops[1]);
|
||||
|
@ -2233,7 +2233,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
|
|||
let Name = "vget_v", MaskedPolicy = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
{
|
||||
ID = Intrinsic::experimental_vector_extract;
|
||||
ID = Intrinsic::vector_extract;
|
||||
auto *VecTy = cast<ScalableVectorType>(ResultType);
|
||||
auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType());
|
||||
// Mask to only valid indices.
|
||||
|
@ -2256,7 +2256,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
|
|||
let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicy = NonePolicy,
|
||||
ManualCodegen = [{
|
||||
{
|
||||
ID = Intrinsic::experimental_vector_insert;
|
||||
ID = Intrinsic::vector_insert;
|
||||
IntrinsicTypes = {ResultType, Ops[2]->getType()};
|
||||
auto *ResVecTy = cast<ScalableVectorType>(ResultType);
|
||||
auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
|
||||
|
|
|
@ -1285,8 +1285,8 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
|
|||
}
|
||||
|
||||
// If coercing a fixed vector to a scalable vector for ABI compatibility, and
|
||||
// the types match, use the llvm.experimental.vector.insert intrinsic to
|
||||
// perform the conversion.
|
||||
// the types match, use the llvm.vector.insert intrinsic to perform the
|
||||
// conversion.
|
||||
if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
|
||||
if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
|
||||
// If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
|
||||
|
@ -2930,8 +2930,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
|
|||
// VLST arguments are coerced to VLATs at the function boundary for
|
||||
// ABI consistency. If this is a VLST that was coerced to
|
||||
// a VLAT at the function boundary and the types match up, use
|
||||
// llvm.experimental.vector.extract to convert back to the original
|
||||
// VLST.
|
||||
// llvm.vector.extract to convert back to the original VLST.
|
||||
if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
|
||||
llvm::Value *Coerced = Fn->getArg(FirstIRArg);
|
||||
if (auto *VecTyFrom =
|
||||
|
|
|
@ -2084,8 +2084,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
|
|||
}
|
||||
|
||||
// If Src is a fixed vector and Dst is a scalable vector, and both have the
|
||||
// same element type, use the llvm.experimental.vector.insert intrinsic to
|
||||
// perform the bitcast.
|
||||
// same element type, use the llvm.vector.insert intrinsic to perform the
|
||||
// bitcast.
|
||||
if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
|
||||
if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
|
||||
// If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
|
||||
|
@ -2112,8 +2112,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
|
|||
}
|
||||
|
||||
// If Src is a scalable vector and Dst is a fixed vector, and both have the
|
||||
// same element type, use the llvm.experimental.vector.extract intrinsic to
|
||||
// perform the bitcast.
|
||||
// same element type, use the llvm.vector.extract intrinsic to perform the
|
||||
// bitcast.
|
||||
if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
|
||||
if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
|
||||
// If we are casting a scalable 16 x i1 predicate vector to a fixed i8
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
|
||||
|
@ -16,7 +16,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
|
||||
|
@ -25,7 +25,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
|
||||
|
@ -34,7 +34,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
|
||||
|
@ -43,7 +43,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
|
||||
|
@ -52,7 +52,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
|
||||
|
@ -61,7 +61,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
|
||||
|
@ -70,7 +70,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
|
||||
|
@ -79,7 +79,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
|
||||
|
@ -88,7 +88,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
|
||||
|
@ -97,7 +97,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
|
||||
|
@ -106,7 +106,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
|
||||
|
@ -115,7 +115,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
|
||||
|
@ -124,7 +124,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
|
||||
|
@ -133,7 +133,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
|
||||
|
@ -142,7 +142,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
|
||||
|
@ -151,7 +151,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
|
||||
|
@ -160,7 +160,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
|
||||
|
@ -169,7 +169,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
|
||||
|
@ -178,7 +178,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
|
||||
|
@ -187,7 +187,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
|
||||
|
@ -196,7 +196,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
|
||||
|
@ -205,7 +205,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
|
||||
|
@ -214,7 +214,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
|
||||
|
@ -223,7 +223,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
|
||||
|
@ -232,7 +232,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
|
||||
|
@ -241,7 +241,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
|
||||
|
@ -250,7 +250,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
|
||||
|
@ -259,7 +259,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
|
||||
|
@ -268,7 +268,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
|
||||
|
@ -277,7 +277,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
|
||||
|
@ -286,7 +286,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
|
||||
|
@ -295,7 +295,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
|
||||
|
@ -304,7 +304,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
|
||||
|
@ -313,7 +313,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
|
||||
|
@ -322,7 +322,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
|
||||
|
@ -331,7 +331,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
|
||||
|
@ -340,7 +340,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
|
||||
|
@ -349,7 +349,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
|
||||
|
@ -358,7 +358,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
|
||||
|
@ -367,7 +367,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
|
||||
|
@ -376,7 +376,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
|
||||
|
@ -385,7 +385,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
|
||||
|
@ -394,7 +394,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
|
||||
|
@ -403,7 +403,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
|
||||
|
@ -412,7 +412,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
|
||||
|
@ -421,7 +421,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
|
||||
|
@ -430,7 +430,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
|
||||
|
@ -439,7 +439,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
|
||||
|
@ -448,7 +448,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
|
||||
|
@ -457,7 +457,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
|
||||
|
@ -466,7 +466,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
|
||||
|
@ -475,7 +475,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
|
||||
|
@ -484,7 +484,7 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
|
||||
|
@ -493,7 +493,7 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
|
||||
|
@ -502,7 +502,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
|
||||
|
@ -511,7 +511,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
|
||||
|
@ -520,7 +520,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
|
||||
|
@ -529,7 +529,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
|
||||
|
@ -538,7 +538,7 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
|
||||
|
@ -16,7 +16,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
|
||||
|
@ -25,7 +25,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
|
||||
|
@ -34,7 +34,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
|
||||
|
@ -43,7 +43,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
|
||||
|
@ -52,7 +52,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
|
||||
|
@ -61,7 +61,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) {
|
||||
|
@ -70,7 +70,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) {
|
||||
|
@ -79,7 +79,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) {
|
||||
|
@ -88,7 +88,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) {
|
||||
|
@ -97,7 +97,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) {
|
||||
|
@ -106,7 +106,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) {
|
||||
|
@ -115,7 +115,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val)
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) {
|
||||
|
@ -124,7 +124,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) {
|
||||
|
@ -133,7 +133,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) {
|
||||
|
@ -142,7 +142,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) {
|
||||
|
@ -151,7 +151,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) {
|
||||
|
@ -160,7 +160,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) {
|
||||
|
@ -169,7 +169,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) {
|
||||
|
@ -178,7 +178,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) {
|
||||
|
@ -187,7 +187,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) {
|
||||
|
@ -196,7 +196,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) {
|
||||
|
@ -205,7 +205,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) {
|
||||
|
@ -214,7 +214,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) {
|
||||
|
@ -223,7 +223,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) {
|
||||
|
@ -232,7 +232,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) {
|
||||
|
@ -241,7 +241,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) {
|
||||
|
@ -250,7 +250,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) {
|
||||
|
@ -259,7 +259,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) {
|
||||
|
@ -268,7 +268,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) {
|
||||
|
@ -277,7 +277,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) {
|
||||
|
@ -286,7 +286,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) {
|
||||
|
@ -295,7 +295,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) {
|
||||
|
@ -304,7 +304,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) {
|
||||
|
@ -313,7 +313,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) {
|
||||
|
@ -322,7 +322,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) {
|
||||
|
@ -331,7 +331,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) {
|
||||
|
@ -340,7 +340,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) {
|
||||
|
@ -349,7 +349,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) {
|
||||
|
@ -358,7 +358,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) {
|
||||
|
@ -367,7 +367,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) {
|
||||
|
@ -376,7 +376,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) {
|
||||
|
@ -385,7 +385,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) {
|
||||
|
@ -394,7 +394,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) {
|
||||
|
@ -403,7 +403,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) {
|
||||
|
@ -412,7 +412,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) {
|
||||
|
@ -421,7 +421,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) {
|
||||
|
@ -430,7 +430,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) {
|
||||
|
@ -439,7 +439,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) {
|
||||
|
@ -448,7 +448,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) {
|
||||
|
@ -457,7 +457,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) {
|
||||
|
@ -466,7 +466,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) {
|
||||
|
@ -475,7 +475,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) {
|
||||
|
@ -484,7 +484,7 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) {
|
||||
|
@ -493,7 +493,7 @@ vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) {
|
||||
|
@ -502,7 +502,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) {
|
||||
|
@ -511,7 +511,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) {
|
||||
|
@ -520,7 +520,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) {
|
||||
|
@ -529,7 +529,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) {
|
||||
|
@ -538,7 +538,7 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) {
|
||||
|
|
|
@ -12,7 +12,7 @@ constexpr int foo() { return 1; }
|
|||
|
||||
// CHECK-RV64-LABEL: @_Z21test_vget_v_i8m2_i8m1u14__rvv_int8m2_t
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) {
|
||||
|
@ -21,7 +21,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @_Z21test_vset_v_i8m1_i8m2u14__rvv_int8m2_tu14__rvv_int8m1_t
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) {
|
||||
|
@ -17,7 +17,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) {
|
||||
|
@ -26,7 +26,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) {
|
||||
|
@ -35,7 +35,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) {
|
||||
|
@ -44,7 +44,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) {
|
||||
|
@ -53,7 +53,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) {
|
||||
|
@ -62,7 +62,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) {
|
||||
|
@ -71,7 +71,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) {
|
||||
|
@ -80,7 +80,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) {
|
||||
|
@ -89,7 +89,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) {
|
||||
|
@ -98,7 +98,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) {
|
||||
|
@ -107,7 +107,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) {
|
||||
|
@ -116,7 +116,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) {
|
||||
|
@ -125,7 +125,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) {
|
||||
|
@ -134,7 +134,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) {
|
||||
|
@ -143,7 +143,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) {
|
||||
|
@ -152,7 +152,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) {
|
||||
|
@ -161,7 +161,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) {
|
||||
|
@ -170,7 +170,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) {
|
||||
|
@ -179,7 +179,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) {
|
||||
|
@ -188,7 +188,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) {
|
||||
|
@ -197,7 +197,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) {
|
||||
|
@ -206,7 +206,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) {
|
||||
|
@ -215,7 +215,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) {
|
||||
|
@ -224,7 +224,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) {
|
||||
|
@ -233,7 +233,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) {
|
||||
|
@ -242,7 +242,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) {
|
||||
|
@ -251,7 +251,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) {
|
||||
|
@ -260,7 +260,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 48)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) {
|
||||
|
@ -269,7 +269,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) {
|
||||
|
@ -278,7 +278,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) {
|
||||
|
@ -287,7 +287,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) {
|
||||
|
@ -296,7 +296,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) {
|
||||
|
@ -305,7 +305,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) {
|
||||
|
@ -314,7 +314,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) {
|
||||
|
@ -323,7 +323,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) {
|
||||
|
@ -332,7 +332,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) {
|
||||
|
@ -341,7 +341,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) {
|
||||
|
@ -350,7 +350,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) {
|
||||
|
@ -359,7 +359,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) {
|
||||
|
@ -368,7 +368,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) {
|
||||
|
@ -377,7 +377,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) {
|
||||
|
@ -386,7 +386,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) {
|
||||
|
@ -395,7 +395,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) {
|
||||
|
@ -404,7 +404,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) {
|
||||
|
@ -413,7 +413,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) {
|
||||
|
@ -422,7 +422,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) {
|
||||
|
@ -431,7 +431,7 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) {
|
||||
|
@ -440,7 +440,7 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) {
|
||||
|
@ -449,7 +449,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) {
|
||||
|
@ -458,7 +458,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) {
|
||||
|
@ -467,7 +467,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) {
|
||||
|
@ -476,7 +476,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) {
|
||||
|
@ -485,7 +485,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) {
|
||||
|
@ -494,7 +494,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) {
|
||||
|
@ -503,7 +503,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) {
|
||||
|
@ -512,7 +512,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) {
|
||||
|
@ -521,7 +521,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) {
|
||||
|
@ -530,7 +530,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) {
|
||||
|
@ -539,7 +539,7 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) {
|
||||
|
@ -548,7 +548,7 @@ vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f16m2_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) {
|
||||
|
@ -557,7 +557,7 @@ vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) {
|
||||
|
@ -566,7 +566,7 @@ vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) {
|
||||
|
@ -575,7 +575,7 @@ vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) {
|
||||
|
@ -584,7 +584,7 @@ vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) {
|
||||
|
@ -593,7 +593,7 @@ vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.extract.nxv16f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.vector.extract.nxv16f16.nxv32f16(<vscale x 32 x half> [[SRC:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vget_v_f16m8_f16m4 (vfloat16m8_t src) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) {
|
||||
|
@ -17,7 +17,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) {
|
||||
|
@ -26,7 +26,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) {
|
||||
|
@ -35,7 +35,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) {
|
||||
|
@ -44,7 +44,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) {
|
||||
|
@ -53,7 +53,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) {
|
||||
|
@ -62,7 +62,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) {
|
||||
|
@ -71,7 +71,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) {
|
||||
|
@ -80,7 +80,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) {
|
||||
|
@ -89,7 +89,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) {
|
||||
|
@ -98,7 +98,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) {
|
||||
|
@ -107,7 +107,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) {
|
||||
|
@ -116,7 +116,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) {
|
||||
|
@ -125,7 +125,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) {
|
||||
|
@ -134,7 +134,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) {
|
||||
|
@ -143,7 +143,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) {
|
||||
|
@ -152,7 +152,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) {
|
||||
|
@ -161,7 +161,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) {
|
||||
|
@ -170,7 +170,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) {
|
||||
|
@ -179,7 +179,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) {
|
||||
|
@ -188,7 +188,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) {
|
||||
|
@ -197,7 +197,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) {
|
||||
|
@ -206,7 +206,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) {
|
||||
|
@ -215,7 +215,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) {
|
||||
|
@ -224,7 +224,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) {
|
||||
|
@ -233,7 +233,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 24)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) {
|
||||
|
@ -242,7 +242,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) {
|
||||
|
@ -251,7 +251,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 56)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) {
|
||||
|
@ -260,7 +260,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) {
|
||||
|
@ -269,7 +269,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 32)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) {
|
||||
|
@ -278,7 +278,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) {
|
||||
|
@ -287,7 +287,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 12)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) {
|
||||
|
@ -296,7 +296,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) {
|
||||
|
@ -305,7 +305,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 28)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) {
|
||||
|
@ -314,7 +314,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) {
|
||||
|
@ -323,7 +323,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 16)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) {
|
||||
|
@ -332,7 +332,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) {
|
||||
|
@ -341,7 +341,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) {
|
||||
|
@ -350,7 +350,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) {
|
||||
|
@ -359,7 +359,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 14)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) {
|
||||
|
@ -368,7 +368,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) {
|
||||
|
@ -377,7 +377,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) {
|
||||
|
@ -386,7 +386,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) {
|
||||
|
@ -395,7 +395,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) {
|
||||
|
@ -404,7 +404,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) {
|
||||
|
@ -413,7 +413,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 7)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) {
|
||||
|
@ -422,7 +422,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) {
|
||||
|
@ -431,7 +431,7 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) {
|
||||
|
@ -440,7 +440,7 @@ vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) {
|
||||
|
@ -449,7 +449,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 6)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) {
|
||||
|
@ -458,7 +458,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) {
|
||||
|
@ -467,7 +467,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 14)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 14)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) {
|
||||
|
@ -476,7 +476,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) {
|
||||
|
@ -485,7 +485,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 8)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) {
|
||||
|
@ -494,7 +494,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) {
|
||||
|
@ -503,7 +503,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) {
|
||||
|
@ -512,7 +512,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) {
|
||||
|
@ -521,7 +521,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 7)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 7)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) {
|
||||
|
@ -530,7 +530,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) {
|
||||
|
@ -539,7 +539,7 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 4)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) {
|
||||
|
@ -548,7 +548,7 @@ vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) {
|
||||
|
@ -557,7 +557,7 @@ vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv4f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv4f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) {
|
||||
|
@ -566,7 +566,7 @@ vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) {
|
||||
|
@ -575,7 +575,7 @@ vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv4f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv4f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 4 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) {
|
||||
|
@ -584,7 +584,7 @@ vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 8 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) {
|
||||
|
@ -593,7 +593,7 @@ vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv16f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 16 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv16f16(<vscale x 32 x half> [[DEST:%.*]], <vscale x 16 x half> [[VAL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vset_v_f16m4_f16m8 (vfloat16m8_t dest, vfloat16m4_t val) {
|
||||
|
|
|
@ -53,9 +53,9 @@ typedef int8_t vec_int8 __attribute__((vector_size(N / 8)));
|
|||
// CHECK128-LABEL: define{{.*}} <16 x i8> @f2(<16 x i8> noundef %x)
|
||||
// CHECK128-NEXT: entry:
|
||||
// CHECK128-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
|
||||
// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
|
||||
// CHECK128-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
|
||||
// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[TMP1]], i64 0)
|
||||
// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[TMP1]], i64 0)
|
||||
// CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]]
|
||||
|
||||
// CHECK-LABEL: define{{.*}} void @f2(
|
||||
|
@ -63,9 +63,9 @@ typedef int8_t vec_int8 __attribute__((vector_size(N / 8)));
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]]
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8(<vscale x 16 x i8> undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8(<vscale x 16 x i8> undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP1]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
|
||||
// CHECK-NEXT: ret void
|
||||
vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); }
|
||||
|
@ -80,14 +80,14 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
|
|||
|
||||
// CHECK128-LABEL: define{{.*}} void @g(<vscale x 16 x i8> noundef %x.coerce)
|
||||
// CHECK128-NEXT: entry:
|
||||
// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK128-NEXT: call void @f3(<16 x i8> noundef [[X]]) [[ATTR5:#.*]]
|
||||
// CHECK128-NEXT: ret void
|
||||
|
||||
// CHECK-LABEL: define{{.*}} void @g(<vscale x 16 x i8> noundef %x.coerce)
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
|
||||
// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
|
||||
// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
|
||||
// CHECK-NEXT: ret void
|
||||
|
|
|
@ -49,10 +49,10 @@ void test02() {
|
|||
// CHECK-SAME: [[#VBITS]]
|
||||
// CHECK-SAME: EES_(<vscale x 4 x i32> noundef %x.coerce, <vscale x 4 x i32> noundef %y.coerce)
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE1:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE1:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32(<vscale x 4 x i32> undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32(<vscale x 4 x i32> undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
typedef svint32_t vec __attribute__((arm_sve_vector_bits(N)));
|
||||
auto f(vec x, vec y) { return x + y; } // Returns a vec.
|
||||
|
@ -68,11 +68,11 @@ typedef svint16_t vec2 __attribute__((arm_sve_vector_bits(N)));
|
|||
// CHECK-SAME: [[#VBITS]]
|
||||
// CHECK-SAME: EE(<vscale x 8 x i16> noundef %x.coerce)
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> noundef [[X]]) [[ATTR5:#.*]]
|
||||
// CHECK128-NEXT: ret void
|
||||
// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16
|
||||
// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
|
||||
// CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], <[[#div(VBITS, 16)]] x i16>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]]
|
||||
// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(<[[#div(VBITS, 16)]] x i16>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
|
||||
// CHECKWIDE-NEXT: ret void
|
||||
|
|
|
@ -39,7 +39,7 @@ svbfloat16_t test_svdupq_lane_bf16(svbfloat16_t data, uint64_t index) {
|
|||
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x bfloat> [[TMP4]], bfloat [[X5:%.*]], i64 5
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x bfloat> [[TMP5]], bfloat [[X6:%.*]], i64 6
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x bfloat> [[TMP6]], bfloat [[X7:%.*]], i64 7
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP9]]
|
||||
//
|
||||
|
@ -53,7 +53,7 @@ svbfloat16_t test_svdupq_lane_bf16(svbfloat16_t data, uint64_t index) {
|
|||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x bfloat> [[TMP4]], bfloat [[X5:%.*]], i64 5
|
||||
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x bfloat> [[TMP5]], bfloat [[X6:%.*]], i64 6
|
||||
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x bfloat> [[TMP6]], bfloat [[X7:%.*]], i64 7
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP9]]
|
||||
//
|
||||
|
|
|
@ -197,7 +197,7 @@ svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index)
|
|||
// CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
|
||||
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
|
||||
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
|
||||
//
|
||||
|
@ -219,7 +219,7 @@ svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index)
|
|||
// CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
|
||||
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
|
||||
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
|
||||
// CPP-CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
|
||||
//
|
||||
|
@ -242,7 +242,7 @@ svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3,
|
|||
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
|
||||
//
|
||||
|
@ -256,7 +256,7 @@ svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3,
|
|||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
|
||||
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
|
||||
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
|
||||
//
|
||||
|
@ -273,7 +273,7 @@ svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3,
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
|
||||
//
|
||||
|
@ -283,7 +283,7 @@ svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3,
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
|
||||
//
|
||||
|
@ -297,7 +297,7 @@ svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
|
||||
//
|
||||
|
@ -305,7 +305,7 @@ svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3)
|
|||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
|
||||
//
|
||||
|
@ -332,7 +332,7 @@ svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1)
|
|||
// CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
|
||||
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
|
||||
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
|
||||
//
|
||||
|
@ -354,7 +354,7 @@ svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1)
|
|||
// CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
|
||||
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
|
||||
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
|
||||
// CPP-CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
|
||||
//
|
||||
|
@ -377,7 +377,7 @@ svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3,
|
|||
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
|
||||
//
|
||||
|
@ -391,7 +391,7 @@ svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3,
|
|||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
|
||||
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
|
||||
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
|
||||
//
|
||||
|
@ -408,7 +408,7 @@ svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3,
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
|
||||
//
|
||||
|
@ -418,7 +418,7 @@ svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3,
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
|
||||
//
|
||||
|
@ -432,7 +432,7 @@ svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3)
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
|
||||
//
|
||||
|
@ -440,7 +440,7 @@ svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3)
|
|||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
|
||||
//
|
||||
|
@ -459,7 +459,7 @@ svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1)
|
|||
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP8]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP9]]
|
||||
//
|
||||
|
@ -473,7 +473,7 @@ svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1)
|
|||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5
|
||||
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6
|
||||
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP8]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP9]]
|
||||
//
|
||||
|
@ -490,7 +490,7 @@ svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
|
||||
//
|
||||
|
@ -500,7 +500,7 @@ svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_
|
|||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
|
||||
//
|
||||
|
@ -514,7 +514,7 @@ svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_
|
|||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
|
||||
//
|
||||
|
@ -522,7 +522,7 @@ svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_
|
|||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP1]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP1]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
|
||||
//
|
||||
|
@ -566,7 +566,7 @@ svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1)
|
|||
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14
|
||||
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP17]], i64 0)
|
||||
// CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP19]]
|
||||
|
@ -606,7 +606,7 @@ svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1)
|
|||
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14
|
||||
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15
|
||||
// CPP-CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
// CPP-CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP17]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP19]]
|
||||
|
@ -639,7 +639,7 @@ svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3,
|
|||
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6
|
||||
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7
|
||||
// CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP15]], i64 0)
|
||||
// CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP17]], i64 0)
|
||||
// CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> [[TMP16]], <vscale x 8 x i16> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK-NEXT: [[TMP20:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP19]])
|
||||
|
@ -664,7 +664,7 @@ svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3,
|
|||
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6
|
||||
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7
|
||||
// CPP-CHECK-NEXT: [[TMP16:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
// CPP-CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP15]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP17]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> [[TMP16]], <vscale x 8 x i16> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CPP-CHECK-NEXT: [[TMP20:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP19]])
|
||||
|
@ -688,7 +688,7 @@ svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3,
|
|||
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP9]], i64 0)
|
||||
// CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> [[TMP8]], <vscale x 4 x i32> [[TMP10]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP11]])
|
||||
|
@ -705,7 +705,7 @@ svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3,
|
|||
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2
|
||||
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
// CPP-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP7]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP9]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> [[TMP8]], <vscale x 4 x i32> [[TMP10]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CPP-CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP11]])
|
||||
|
@ -724,7 +724,7 @@ svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3)
|
|||
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> [[TMP4]], <vscale x 2 x i64> [[TMP6]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP7]])
|
||||
|
@ -737,7 +737,7 @@ svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3)
|
|||
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0
|
||||
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1
|
||||
// CPP-CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP3]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> [[TMP4]], <vscale x 2 x i64> [[TMP6]], <vscale x 2 x i64> zeroinitializer)
|
||||
// CPP-CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP7]])
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -30,11 +30,11 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
|
|||
// CHECK-LABEL: @and_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP1:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <8 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
//
|
||||
|
@ -44,10 +44,10 @@ fixed_bool_t and_bool(fixed_bool_t a, fixed_bool_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
|
@ -56,10 +56,10 @@ fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
|
@ -68,10 +68,10 @@ fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
|
@ -80,10 +80,10 @@ fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
|
@ -92,10 +92,10 @@ fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
|
@ -104,10 +104,10 @@ fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
|
@ -116,10 +116,10 @@ fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
|
@ -128,10 +128,10 @@ fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @and_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[AND]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[AND]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
|
@ -143,11 +143,11 @@ fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
|||
// CHECK-LABEL: @or_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP1:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <8 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
//
|
||||
|
@ -157,10 +157,10 @@ fixed_bool_t or_bool(fixed_bool_t a, fixed_bool_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
|
@ -169,10 +169,10 @@ fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
|
@ -181,10 +181,10 @@ fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
|
@ -193,10 +193,10 @@ fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
|
@ -205,10 +205,10 @@ fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
|
@ -217,10 +217,10 @@ fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
|
@ -229,10 +229,10 @@ fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
|
@ -241,10 +241,10 @@ fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @or_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[OR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[OR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
|
@ -256,11 +256,11 @@ fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
|||
// CHECK-LABEL: @xor_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP1:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[B_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
//
|
||||
|
@ -270,10 +270,10 @@ fixed_bool_t xor_bool(fixed_bool_t a, fixed_bool_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
|
@ -282,10 +282,10 @@ fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
|
@ -294,10 +294,10 @@ fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
|
@ -306,10 +306,10 @@ fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
|
@ -318,10 +318,10 @@ fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
|
@ -330,10 +330,10 @@ fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
|
@ -342,10 +342,10 @@ fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
|
@ -354,10 +354,10 @@ fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @xor_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[XOR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
|
@ -369,9 +369,9 @@ fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
|||
// CHECK-LABEL: @neg_bool(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A_COERCE:%.*]] = bitcast <vscale x 16 x i1> [[TMP0:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[A_COERCE]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <8 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTSCALABLESVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
|
@ -381,9 +381,9 @@ fixed_bool_t neg_bool(fixed_bool_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t neg_i8(fixed_int8_t a) {
|
||||
|
@ -392,9 +392,9 @@ fixed_int8_t neg_i8(fixed_int8_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t neg_i16(fixed_int16_t a) {
|
||||
|
@ -403,9 +403,9 @@ fixed_int16_t neg_i16(fixed_int16_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t neg_i32(fixed_int32_t a) {
|
||||
|
@ -414,9 +414,9 @@ fixed_int32_t neg_i32(fixed_int32_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t neg_i64(fixed_int64_t a) {
|
||||
|
@ -425,9 +425,9 @@ fixed_int64_t neg_i64(fixed_int64_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t neg_u8(fixed_uint8_t a) {
|
||||
|
@ -436,9 +436,9 @@ fixed_uint8_t neg_u8(fixed_uint8_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t neg_u16(fixed_uint16_t a) {
|
||||
|
@ -447,9 +447,9 @@ fixed_uint16_t neg_u16(fixed_uint16_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t neg_u32(fixed_uint32_t a) {
|
||||
|
@ -458,9 +458,9 @@ fixed_uint32_t neg_u32(fixed_uint32_t a) {
|
|||
|
||||
// CHECK-LABEL: @neg_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[NEG]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t neg_u64(fixed_uint64_t a) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,10 +27,10 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
|
|||
|
||||
// CHECK-LABEL: @lshift_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t lshift_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
|
@ -39,10 +39,10 @@ fixed_int8_t lshift_i8(fixed_int8_t a, fixed_int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t rshift_i8(fixed_int8_t a, fixed_int8_t b) {
|
||||
|
@ -51,10 +51,10 @@ fixed_int8_t rshift_i8(fixed_int8_t a, fixed_int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t lshift_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
|
@ -63,10 +63,10 @@ fixed_uint8_t lshift_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t rshift_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
||||
|
@ -75,10 +75,10 @@ fixed_uint8_t rshift_u8(fixed_uint8_t a, fixed_uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t lshift_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
|
@ -87,10 +87,10 @@ fixed_int16_t lshift_i16(fixed_int16_t a, fixed_int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t rshift_i16(fixed_int16_t a, fixed_int16_t b) {
|
||||
|
@ -99,10 +99,10 @@ fixed_int16_t rshift_i16(fixed_int16_t a, fixed_int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t lshift_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
|
@ -111,10 +111,10 @@ fixed_uint16_t lshift_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t rshift_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
||||
|
@ -123,10 +123,10 @@ fixed_uint16_t rshift_u16(fixed_uint16_t a, fixed_uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t lshift_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
|
@ -135,10 +135,10 @@ fixed_int32_t lshift_i32(fixed_int32_t a, fixed_int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t rshift_i32(fixed_int32_t a, fixed_int32_t b) {
|
||||
|
@ -147,10 +147,10 @@ fixed_int32_t rshift_i32(fixed_int32_t a, fixed_int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t lshift_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
|
@ -159,10 +159,10 @@ fixed_uint32_t lshift_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t rshift_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
||||
|
@ -171,10 +171,10 @@ fixed_uint32_t rshift_u32(fixed_uint32_t a, fixed_uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t lshift_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
|
@ -183,10 +183,10 @@ fixed_int64_t lshift_i64(fixed_int64_t a, fixed_int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t rshift_i64(fixed_int64_t a, fixed_int64_t b) {
|
||||
|
@ -195,10 +195,10 @@ fixed_int64_t rshift_i64(fixed_int64_t a, fixed_int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t lshift_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
|
@ -207,10 +207,10 @@ fixed_uint64_t lshift_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[B]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t rshift_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
||||
|
@ -219,13 +219,13 @@ fixed_uint64_t rshift_u64(fixed_uint64_t a, fixed_uint64_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t lshift_i8_rsplat(fixed_int8_t a, int8_t b) {
|
||||
|
@ -234,11 +234,11 @@ fixed_int8_t lshift_i8_rsplat(fixed_int8_t a, int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t lshift_i8_lsplat(fixed_int8_t a, int8_t b) {
|
||||
|
@ -247,13 +247,13 @@ fixed_int8_t lshift_i8_lsplat(fixed_int8_t a, int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t rshift_i8_rsplat(fixed_int8_t a, int8_t b) {
|
||||
|
@ -262,11 +262,11 @@ fixed_int8_t rshift_i8_rsplat(fixed_int8_t a, int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int8_t rshift_i8_lsplat(fixed_int8_t a, int8_t b) {
|
||||
|
@ -275,13 +275,13 @@ fixed_int8_t rshift_i8_lsplat(fixed_int8_t a, int8_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t lshift_u8_rsplat(fixed_uint8_t a, uint8_t b) {
|
||||
|
@ -290,11 +290,11 @@ fixed_uint8_t lshift_u8_rsplat(fixed_uint8_t a, uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t lshift_u8_lsplat(fixed_uint8_t a, uint8_t b) {
|
||||
|
@ -303,13 +303,13 @@ fixed_uint8_t lshift_u8_lsplat(fixed_uint8_t a, uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u8_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t rshift_u8_rsplat(fixed_uint8_t a, uint8_t b) {
|
||||
|
@ -318,11 +318,11 @@ fixed_uint8_t rshift_u8_rsplat(fixed_uint8_t a, uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u8_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint8_t rshift_u8_lsplat(fixed_uint8_t a, uint8_t b) {
|
||||
|
@ -331,13 +331,13 @@ fixed_uint8_t rshift_u8_lsplat(fixed_uint8_t a, uint8_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t lshift_i16_rsplat(fixed_int16_t a, int16_t b) {
|
||||
|
@ -346,11 +346,11 @@ fixed_int16_t lshift_i16_rsplat(fixed_int16_t a, int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t lshift_i16_lsplat(fixed_int16_t a, int16_t b) {
|
||||
|
@ -359,13 +359,13 @@ fixed_int16_t lshift_i16_lsplat(fixed_int16_t a, int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t rshift_i16_rsplat(fixed_int16_t a, int16_t b) {
|
||||
|
@ -374,11 +374,11 @@ fixed_int16_t rshift_i16_rsplat(fixed_int16_t a, int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int16_t rshift_i16_lsplat(fixed_int16_t a, int16_t b) {
|
||||
|
@ -387,13 +387,13 @@ fixed_int16_t rshift_i16_lsplat(fixed_int16_t a, int16_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t lshift_u16_rsplat(fixed_uint16_t a, uint16_t b) {
|
||||
|
@ -402,11 +402,11 @@ fixed_uint16_t lshift_u16_rsplat(fixed_uint16_t a, uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t lshift_u16_lsplat(fixed_uint16_t a, uint16_t b) {
|
||||
|
@ -415,13 +415,13 @@ fixed_uint16_t lshift_u16_lsplat(fixed_uint16_t a, uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u16_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16>
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[SH_PROM]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t rshift_u16_rsplat(fixed_uint16_t a, uint16_t b) {
|
||||
|
@ -430,11 +430,11 @@ fixed_uint16_t rshift_u16_rsplat(fixed_uint16_t a, uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u16_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint16_t rshift_u16_lsplat(fixed_uint16_t a, uint16_t b) {
|
||||
|
@ -443,11 +443,11 @@ fixed_uint16_t rshift_u16_lsplat(fixed_uint16_t a, uint16_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t lshift_i32_rsplat(fixed_int32_t a, int32_t b) {
|
||||
|
@ -456,11 +456,11 @@ fixed_int32_t lshift_i32_rsplat(fixed_int32_t a, int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t lshift_i32_lsplat(fixed_int32_t a, int32_t b) {
|
||||
|
@ -469,11 +469,11 @@ fixed_int32_t lshift_i32_lsplat(fixed_int32_t a, int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t rshift_i32_rsplat(fixed_int32_t a, int32_t b) {
|
||||
|
@ -482,11 +482,11 @@ fixed_int32_t rshift_i32_rsplat(fixed_int32_t a, int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t rshift_i32_lsplat(fixed_int32_t a, int32_t b) {
|
||||
|
@ -495,11 +495,11 @@ fixed_int32_t rshift_i32_lsplat(fixed_int32_t a, int32_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t lshift_u32_rsplat(fixed_uint32_t a, uint32_t b) {
|
||||
|
@ -508,11 +508,11 @@ fixed_uint32_t lshift_u32_rsplat(fixed_uint32_t a, uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t lshift_u32_lsplat(fixed_uint32_t a, uint32_t b) {
|
||||
|
@ -521,11 +521,11 @@ fixed_uint32_t lshift_u32_lsplat(fixed_uint32_t a, uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u32_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t rshift_u32_rsplat(fixed_uint32_t a, uint32_t b) {
|
||||
|
@ -534,11 +534,11 @@ fixed_uint32_t rshift_u32_rsplat(fixed_uint32_t a, uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u32_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint32_t rshift_u32_lsplat(fixed_uint32_t a, uint32_t b) {
|
||||
|
@ -547,11 +547,11 @@ fixed_uint32_t rshift_u32_lsplat(fixed_uint32_t a, uint32_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t lshift_i64_rsplat(fixed_int64_t a, int64_t b) {
|
||||
|
@ -560,11 +560,11 @@ fixed_int64_t lshift_i64_rsplat(fixed_int64_t a, int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_i64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t lshift_i64_lsplat(fixed_int64_t a, int64_t b) {
|
||||
|
@ -573,11 +573,11 @@ fixed_int64_t lshift_i64_lsplat(fixed_int64_t a, int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t rshift_i64_rsplat(fixed_int64_t a, int64_t b) {
|
||||
|
@ -586,11 +586,11 @@ fixed_int64_t rshift_i64_rsplat(fixed_int64_t a, int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_i64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int64_t rshift_i64_lsplat(fixed_int64_t a, int64_t b) {
|
||||
|
@ -599,11 +599,11 @@ fixed_int64_t rshift_i64_lsplat(fixed_int64_t a, int64_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t lshift_u64_rsplat(fixed_uint64_t a, uint64_t b) {
|
||||
|
@ -612,11 +612,11 @@ fixed_uint64_t lshift_u64_rsplat(fixed_uint64_t a, uint64_t b) {
|
|||
|
||||
// CHECK-LABEL: @lshift_u64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHL]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t lshift_u64_lsplat(fixed_uint64_t a, uint64_t b) {
|
||||
|
@ -625,11 +625,11 @@ fixed_uint64_t lshift_u64_lsplat(fixed_uint64_t a, uint64_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u64_rsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[SPLAT_SPLAT]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t rshift_u64_rsplat(fixed_uint64_t a, uint64_t b) {
|
||||
|
@ -638,11 +638,11 @@ fixed_uint64_t rshift_u64_rsplat(fixed_uint64_t a, uint64_t b) {
|
|||
|
||||
// CHECK-LABEL: @rshift_u64_lsplat(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0
|
||||
// CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
|
||||
// CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[SPLAT_SPLAT]], [[A]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SHR]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_uint64_t rshift_u64_lsplat(fixed_uint64_t a, uint64_t b) {
|
||||
|
|
|
@ -28,7 +28,7 @@ typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
|
|||
|
||||
// CHECK-LABEL: @subscript_int16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i16 [[VECEXT]]
|
||||
//
|
||||
|
@ -38,7 +38,7 @@ int16_t subscript_int16(fixed_int16_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_uint16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i16 [[VECEXT]]
|
||||
//
|
||||
|
@ -48,7 +48,7 @@ uint16_t subscript_uint16(fixed_uint16_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_int32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i32 [[VECEXT]]
|
||||
//
|
||||
|
@ -58,7 +58,7 @@ int32_t subscript_int32(fixed_int32_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_uint32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i32 [[VECEXT]]
|
||||
//
|
||||
|
@ -68,7 +68,7 @@ uint32_t subscript_uint32(fixed_uint32_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_int64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i64 [[VECEXT]]
|
||||
//
|
||||
|
@ -78,7 +78,7 @@ int64_t subscript_int64(fixed_int64_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_uint64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret i64 [[VECEXT]]
|
||||
//
|
||||
|
@ -88,7 +88,7 @@ uint64_t subscript_uint64(fixed_uint64_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_float16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x half> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret half [[VECEXT]]
|
||||
//
|
||||
|
@ -98,7 +98,7 @@ __fp16 subscript_float16(fixed_float16_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_float32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x float> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret float [[VECEXT]]
|
||||
//
|
||||
|
@ -108,7 +108,7 @@ float subscript_float32(fixed_float32_t a, size_t b) {
|
|||
|
||||
// CHECK-LABEL: @subscript_float64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x double> [[A]], i64 [[B:%.*]]
|
||||
// CHECK-NEXT: ret double [[VECEXT]]
|
||||
//
|
||||
|
|
|
@ -16,13 +16,13 @@
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_s8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svdup_neonq_s811__Int8x16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
|
@ -32,13 +32,13 @@ svint8_t test_svdup_neonq_s8(int8x16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_s16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s1611__Int16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
|
||||
//
|
||||
|
@ -48,13 +48,13 @@ svint16_t test_svdup_neonq_s16(int16x8_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_s32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s3211__Int32x4_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
//
|
||||
|
@ -64,13 +64,13 @@ svint32_t test_svdup_neonq_s32(int32x4_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_s64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s6411__Int64x2_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
//
|
||||
|
@ -80,13 +80,13 @@ svint64_t test_svdup_neonq_s64(int64x2_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svdup_neonq_u812__Uint8x16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
|
||||
//
|
||||
|
@ -96,13 +96,13 @@ svuint8_t test_svdup_neonq_u8(uint8x16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u1612__Uint16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
|
||||
//
|
||||
|
@ -112,13 +112,13 @@ svuint16_t test_svdup_neonq_u16(uint16x8_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u3212__Uint32x4_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
//
|
||||
|
@ -128,13 +128,13 @@ svuint32_t test_svdup_neonq_u32(uint32x4_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u6412__Uint64x2_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
//
|
||||
|
@ -144,13 +144,13 @@ svuint64_t test_svdup_neonq_u64(uint64x2_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f1613__Float16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
|
||||
//
|
||||
|
@ -158,18 +158,18 @@ svfloat16_t test_svdup_neonq_f16(float16x8_t n) {
|
|||
return SVE_ACLE_FUNC(svdup_neonq, _f16, , )(n);
|
||||
}
|
||||
|
||||
// CHECK-NEXT %0 = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %n, i64 0)
|
||||
// CHECK-NEXT %0 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %n, i64 0)
|
||||
// CHECK-NEXT %1 = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %0, i64 0)
|
||||
// CHECK-NEXT ret <vscale x 4 x float> %1
|
||||
// CHECK-LABEL: @test_svdup_neonq_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f3213__Float32x4_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
|
||||
//
|
||||
|
@ -179,13 +179,13 @@ svfloat32_t test_svdup_neonq_f32(float32x4_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_f64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f6413__Float64x2_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
|
||||
//
|
||||
|
@ -195,13 +195,13 @@ svfloat64_t test_svdup_neonq_f64(float64x2_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svdup_neonq_bf16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP1]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z21test_svdup_neonq_bf1614__Bfloat16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP1]]
|
||||
//
|
||||
|
|
|
@ -16,12 +16,12 @@
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_s8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svget_neonq_s8u10__SVInt8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
||||
//
|
||||
int8x16_t test_svget_neonq_s8(svint8_t n) {
|
||||
|
@ -31,12 +31,12 @@ int8x16_t test_svget_neonq_s8(svint8_t n) {
|
|||
//
|
||||
// CHECK-LABEL: @test_svget_neonq_s16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_s16u11__SVInt16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
||||
//
|
||||
int16x8_t test_svget_neonq_s16(svint16_t n) {
|
||||
|
@ -45,12 +45,12 @@ int16x8_t test_svget_neonq_s16(svint16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_s32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_s32u11__SVInt32_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
int32x4_t test_svget_neonq_s32(svint32_t n) {
|
||||
|
@ -59,12 +59,12 @@ int32x4_t test_svget_neonq_s32(svint32_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_s64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <2 x i64> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_s64u11__SVInt64_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]]
|
||||
//
|
||||
int64x2_t test_svget_neonq_s64(svint64_t n) {
|
||||
|
@ -73,12 +73,12 @@ int64x2_t test_svget_neonq_s64(svint64_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svget_neonq_u8u11__SVUint8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
||||
//
|
||||
uint8x16_t test_svget_neonq_u8(svuint8_t n) {
|
||||
|
@ -87,12 +87,12 @@ uint8x16_t test_svget_neonq_u8(svuint8_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_u16u12__SVUint16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
||||
//
|
||||
uint16x8_t test_svget_neonq_u16(svuint16_t n) {
|
||||
|
@ -101,12 +101,12 @@ uint16x8_t test_svget_neonq_u16(svuint16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_u32u12__SVUint32_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
uint32x4_t test_svget_neonq_u32(svuint32_t n) {
|
||||
|
@ -115,12 +115,12 @@ uint32x4_t test_svget_neonq_u32(svuint32_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <2 x i64> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_u64u12__SVUint64_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]]
|
||||
//
|
||||
uint64x2_t test_svget_neonq_u64(svuint64_t n) {
|
||||
|
@ -129,12 +129,12 @@ uint64x2_t test_svget_neonq_u64(svuint64_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16(<vscale x 8 x half> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.vector.extract.v8f16.nxv8f16(<vscale x 8 x half> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <8 x half> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_f16u13__SVFloat16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16(<vscale x 8 x half> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.vector.extract.v8f16.nxv8f16(<vscale x 8 x half> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <8 x half> [[TMP0]]
|
||||
//
|
||||
float16x8_t test_svget_neonq_f16(svfloat16_t n) {
|
||||
|
@ -143,12 +143,12 @@ float16x8_t test_svget_neonq_f16(svfloat16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32(<vscale x 4 x float> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <4 x float> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_f32u13__SVFloat32_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32(<vscale x 4 x float> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <4 x float> [[TMP0]]
|
||||
//
|
||||
float32x4_t test_svget_neonq_f32(svfloat32_t n) {
|
||||
|
@ -157,12 +157,12 @@ float32x4_t test_svget_neonq_f32(svfloat32_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_f64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <2 x double> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svget_neonq_f64u13__SVFloat64_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <2 x double> [[TMP0]]
|
||||
//
|
||||
float64x2_t test_svget_neonq_f64(svfloat64_t n) {
|
||||
|
@ -171,12 +171,12 @@ float64x2_t test_svget_neonq_f64(svfloat64_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svget_neonq_bf16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z21test_svget_neonq_bf16u14__SVBFloat16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
|
||||
//
|
||||
bfloat16x8_t test_svget_neonq_bf16(svbfloat16_t n) {
|
||||
|
|
|
@ -16,12 +16,12 @@
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_s8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svset_neonq_s8u10__SVInt8_t11__Int8x16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
svint8_t test_svset_neonq_s8(svint8_t s, int8x16_t n) {
|
||||
|
@ -30,12 +30,12 @@ svint8_t test_svset_neonq_s8(svint8_t s, int8x16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_s16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_s16u11__SVInt16_t11__Int16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
svint16_t test_svset_neonq_s16(svint16_t s, int16x8_t n) {
|
||||
|
@ -44,12 +44,12 @@ svint16_t test_svset_neonq_s16(svint16_t s, int16x8_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_s32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_s32u11__SVInt32_t11__Int32x4_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
svint32_t test_svset_neonq_s32(svint32_t s, int32x4_t n) {
|
||||
|
@ -58,12 +58,12 @@ svint32_t test_svset_neonq_s32(svint32_t s, int32x4_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_s64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_s64u11__SVInt64_t11__Int64x2_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
svint64_t test_svset_neonq_s64(svint64_t s, int64x2_t n) {
|
||||
|
@ -72,12 +72,12 @@ svint64_t test_svset_neonq_s64(svint64_t s, int64x2_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_u8(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z19test_svset_neonq_u8u11__SVUint8_t12__Uint8x16_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> [[S:%.*]], <16 x i8> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
svuint8_t test_svset_neonq_u8(svuint8_t s, uint8x16_t n) {
|
||||
|
@ -86,12 +86,12 @@ svuint8_t test_svset_neonq_u8(svuint8_t s, uint8x16_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_u16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_u16u12__SVUint16_t12__Uint16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> [[S:%.*]], <8 x i16> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
svuint16_t test_svset_neonq_u16(svuint16_t s, uint16x8_t n) {
|
||||
|
@ -100,12 +100,12 @@ svuint16_t test_svset_neonq_u16(svuint16_t s, uint16x8_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_u32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_u32u12__SVUint32_t12__Uint32x4_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[S:%.*]], <4 x i32> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
svuint32_t test_svset_neonq_u32(svuint32_t s, uint32x4_t n) {
|
||||
|
@ -114,12 +114,12 @@ svuint32_t test_svset_neonq_u32(svuint32_t s, uint32x4_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_u64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_u64u12__SVUint64_t12__Uint64x2_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> [[S:%.*]], <2 x i64> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
svuint64_t test_svset_neonq_u64(svuint64_t s, uint64x2_t n) {
|
||||
|
@ -128,12 +128,12 @@ svuint64_t test_svset_neonq_u64(svuint64_t s, uint64x2_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_f16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> [[S:%.*]], <8 x half> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> [[S:%.*]], <8 x half> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_f16u13__SVFloat16_t13__Float16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> [[S:%.*]], <8 x half> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> [[S:%.*]], <8 x half> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
svfloat16_t test_svset_neonq_f16(svfloat16_t s, float16x8_t n) {
|
||||
|
@ -142,12 +142,12 @@ svfloat16_t test_svset_neonq_f16(svfloat16_t s, float16x8_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_f32(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> [[S:%.*]], <4 x float> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> [[S:%.*]], <4 x float> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_f32u13__SVFloat32_t13__Float32x4_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> [[S:%.*]], <4 x float> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> [[S:%.*]], <4 x float> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
svfloat32_t test_svset_neonq_f32(svfloat32_t s, float32x4_t n) {
|
||||
|
@ -156,12 +156,12 @@ svfloat32_t test_svset_neonq_f32(svfloat32_t s, float32x4_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_f64(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> [[S:%.*]], <2 x double> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> [[S:%.*]], <2 x double> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z20test_svset_neonq_f64u13__SVFloat64_t13__Float64x2_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> [[S:%.*]], <2 x double> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> [[S:%.*]], <2 x double> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
svfloat64_t test_svset_neonq_f64(svfloat64_t s, float64x2_t n) {
|
||||
|
@ -170,12 +170,12 @@ svfloat64_t test_svset_neonq_f64(svfloat64_t s, float64x2_t n) {
|
|||
|
||||
// CHECK-LABEL: @test_svset_neonq_bf16(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
||||
//
|
||||
// CPP-CHECK-LABEL: @_Z21test_svset_neonq_bf16u14__SVBFloat16_t14__Bfloat16x8_t(
|
||||
// CPP-CHECK-NEXT: entry:
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0)
|
||||
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
||||
//
|
||||
svbfloat16_t test_svset_neonq_bf16(svbfloat16_t s, bfloat16x8_t n) {
|
||||
|
|
|
@ -32,21 +32,21 @@ DEFINE_STRUCT(bool)
|
|||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-256-LABEL: @read_int64(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-512-LABEL: @read_int64(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
svint64_t read_int64(struct struct_int64 *s) {
|
||||
|
@ -55,21 +55,21 @@ svint64_t read_int64(struct struct_int64 *s) {
|
|||
|
||||
// CHECK-128-LABEL: @write_int64(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
//
|
||||
// CHECK-256-LABEL: @write_int64(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: store <4 x i64> [[CASTFIXEDSVE]], <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: ret void
|
||||
//
|
||||
// CHECK-512-LABEL: @write_int64(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[X:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
|
@ -86,21 +86,21 @@ void write_int64(struct struct_int64 *s, svint64_t x) {
|
|||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: ret <vscale x 2 x double> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-256-LABEL: @read_float64(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: ret <vscale x 2 x double> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-512-LABEL: @read_float64(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: ret <vscale x 2 x double> [[CASTSCALABLESVE]]
|
||||
//
|
||||
svfloat64_t read_float64(struct struct_float64 *s) {
|
||||
|
@ -109,21 +109,21 @@ svfloat64_t read_float64(struct struct_float64 *s) {
|
|||
|
||||
// CHECK-128-LABEL: @write_float64(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: store <2 x double> [[CASTFIXEDSVE]], <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
//
|
||||
// CHECK-256-LABEL: @write_float64(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: store <4 x double> [[CASTFIXEDSVE]], <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: ret void
|
||||
//
|
||||
// CHECK-512-LABEL: @write_float64(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[X:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
|
@ -140,21 +140,21 @@ void write_float64(struct struct_float64 *s, svfloat64_t x) {
|
|||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-256-LABEL: @read_bfloat16(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: [[TMP0:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-512-LABEL: @read_bfloat16(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v32bf16(<vscale x 8 x bfloat> undef, <32 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v32bf16(<vscale x 8 x bfloat> undef, <32 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
|
||||
//
|
||||
svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) {
|
||||
|
@ -163,21 +163,21 @@ svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) {
|
|||
|
||||
// CHECK-128-LABEL: @write_bfloat16(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
//
|
||||
// CHECK-256-LABEL: @write_bfloat16(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: store <16 x bfloat> [[CASTFIXEDSVE]], <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: ret void
|
||||
//
|
||||
// CHECK-512-LABEL: @write_bfloat16(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16(<vscale x 8 x bfloat> [[X:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
|
@ -194,7 +194,7 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) {
|
|||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-128-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
|
@ -202,7 +202,7 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) {
|
|||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-256-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
|
@ -210,7 +210,7 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) {
|
|||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-512-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
|
@ -221,7 +221,7 @@ svbool_t read_bool(struct struct_bool *s) {
|
|||
// CHECK-128-LABEL: @write_bool(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> %x to <vscale x 2 x i8>
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
|
@ -229,7 +229,7 @@ svbool_t read_bool(struct struct_bool *s) {
|
|||
// CHECK-256-LABEL: @write_bool(
|
||||
// CHECK-256-NEXT: entry:
|
||||
// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> %x to <vscale x 2 x i8>
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-256-NEXT: store <4 x i8> [[CASTFIXEDSVE]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-256-NEXT: ret void
|
||||
|
@ -237,7 +237,7 @@ svbool_t read_bool(struct struct_bool *s) {
|
|||
// CHECK-512-LABEL: @write_bool(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> %x to <vscale x 2 x i8>
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0
|
||||
// CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
|
|
|
@ -45,7 +45,7 @@ fixed_int32_t fixed_callee(fixed_int32_t x) {
|
|||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[COERCE1]] to <vscale x 4 x i32>*
|
||||
// CHECK-NEXT: store <vscale x 4 x i32> [[X:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE2]]
|
||||
//
|
||||
svint32_t sizeless_caller(svint32_t x) {
|
||||
|
|
|
@ -63,7 +63,7 @@ fixed_bool_t from_svbool_t(svbool_t type) {
|
|||
// CHECK-LABEL: @lax_cast(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = alloca <16 x i32>, align 64
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[TMP0:%.*]], align 64, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[TMP0]] to <vscale x 2 x i64>*
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP1]], align 64, !tbaa [[TBAA6]]
|
||||
|
@ -76,7 +76,7 @@ svint64_t lax_cast(fixed_int32_t type) {
|
|||
// CHECK-LABEL: @to_svint32_t__from_gnu_int32_t(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) {
|
||||
|
@ -85,7 +85,7 @@ svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) {
|
|||
|
||||
// CHECK-LABEL: @from_svint32_t__to_gnu_int32_t(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
|
||||
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
|
@ -96,7 +96,7 @@ gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) {
|
|||
// CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) {
|
||||
|
@ -105,7 +105,7 @@ fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) {
|
|||
|
||||
// CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
|
||||
// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
|
|
|
@ -24,23 +24,23 @@ fixed_int32_t global_vec;
|
|||
// CHECK-NEXT: store <vscale x 4 x i32> [[VEC:%.*]], <vscale x 4 x i32>* [[VEC_ADDR]], align 16
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PRED_ADDR]], align 2
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP3]], i64 0)
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE2]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP4]])
|
||||
// CHECK-NEXT: store <vscale x 16 x i1> [[TMP5]], <vscale x 16 x i1>* [[PG]], align 2
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PG]], align 2
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP7]], i64 0)
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[VEC_ADDR]], align 16
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP6]])
|
||||
// CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[TMP8]])
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP10]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP10]], i64 0)
|
||||
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP11]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP11]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE4]]
|
||||
//
|
||||
fixed_int32_t foo(svbool_t pred, svint32_t vec) {
|
||||
|
@ -57,7 +57,7 @@ fixed_int32_t foo(svbool_t pred, svint32_t vec) {
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16
|
||||
// CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t test_ptr_to_global() {
|
||||
|
@ -78,7 +78,7 @@ fixed_int32_t test_ptr_to_global() {
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[ARRAYIDX]], align 16
|
||||
// CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
|
||||
//
|
||||
fixed_int32_t array_arg(fixed_int32_t arr[]) {
|
||||
|
@ -96,7 +96,7 @@ fixed_int32_t array_arg(fixed_int32_t arr[]) {
|
|||
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 2
|
||||
// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, <8 x i8>* [[RETVAL]], align 2
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP2]], i64 0)
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP3]]
|
||||
//
|
||||
|
@ -121,25 +121,25 @@ fixed_bool_t address_of_array_idx() {
|
|||
// CHECK-NEXT: store <8 x i8> <i8 2, i8 5, i8 4, i8 6, i8 0, i8 0, i8 0, i8 0>, <8 x i8>* [[YY]], align 8
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PRED_ADDR]], align 2
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP1]], i64 0)
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[XX]], align 8
|
||||
// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[YY]], align 8
|
||||
// CHECK-NEXT: [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]]
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[ADD]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[ADD]], i64 0)
|
||||
// CHECK-NEXT: [[TMP5:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE2]] to <vscale x 16 x i1>
|
||||
// CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP5]])
|
||||
// CHECK-NEXT: store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[PG]], align 2
|
||||
// CHECK-NEXT: [[TMP7:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PG]], align 2
|
||||
// CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP8]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP8]], i64 0)
|
||||
// CHECK-NEXT: [[TMP9:%.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>* [[VEC_ADDR]], align 16
|
||||
// CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP7]])
|
||||
// CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> [[TMP10]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[TMP9]])
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP11]], i64 0)
|
||||
// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP11]], i64 0)
|
||||
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP12]], i64 0)
|
||||
// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP12]], i64 0)
|
||||
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE4]]
|
||||
//
|
||||
fixed_int32_t test_cast(svbool_t pred, svint32_t vec) {
|
||||
|
|
|
@ -22,13 +22,13 @@ fixed_bool_t global_bool;
|
|||
|
||||
// CHECK-128-LABEL: @write_global_i64(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[V:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> [[V:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
//
|
||||
// CHECK-512-LABEL: @write_global_i64(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[V:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[V:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
//
|
||||
|
@ -36,13 +36,13 @@ void write_global_i64(svint64_t v) { global_i64 = v; }
|
|||
|
||||
// CHECK-128-LABEL: @write_global_bf16(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[V:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16(<vscale x 8 x bfloat> [[V:%.*]], i64 0)
|
||||
// CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
//
|
||||
// CHECK-512-LABEL: @write_global_bf16(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16(<vscale x 8 x bfloat> [[V:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16(<vscale x 8 x bfloat> [[V:%.*]], i64 0)
|
||||
// CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
//
|
||||
|
@ -51,14 +51,14 @@ void write_global_bf16(svbfloat16_t v) { global_bf16 = v; }
|
|||
// CHECK-128-LABEL: @write_global_bool(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[V:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6:![0-9]+]]
|
||||
// CHECK-128-NEXT: ret void
|
||||
//
|
||||
// CHECK-512-LABEL: @write_global_bool(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[V:%.*]] to <vscale x 2 x i8>
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: ret void
|
||||
//
|
||||
|
@ -71,13 +71,13 @@ void write_global_bool(svbool_t v) { global_bool = v; }
|
|||
// CHECK-128-LABEL: @read_global_i64(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-512-LABEL: @read_global_i64(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
|
||||
//
|
||||
svint64_t read_global_i64() { return global_i64; }
|
||||
|
@ -85,13 +85,13 @@ svint64_t read_global_i64() { return global_i64; }
|
|||
// CHECK-128-LABEL: @read_global_bf16(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
|
||||
//
|
||||
// CHECK-512-LABEL: @read_global_bf16(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v32bf16(<vscale x 8 x bfloat> undef, <32 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v32bf16(<vscale x 8 x bfloat> undef, <32 x bfloat> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: ret <vscale x 8 x bfloat> [[CASTSCALABLESVE]]
|
||||
//
|
||||
svbfloat16_t read_global_bf16() { return global_bf16; }
|
||||
|
@ -99,14 +99,14 @@ svbfloat16_t read_global_bf16() { return global_bf16; }
|
|||
// CHECK-128-LABEL: @read_global_bool(
|
||||
// CHECK-128-NEXT: entry:
|
||||
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-128-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
// CHECK-512-LABEL: @read_global_bool(
|
||||
// CHECK-512-NEXT: entry:
|
||||
// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]]
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
|
||||
// CHECK-512-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CASTFIXEDSVE]] to <vscale x 16 x i1>
|
||||
// CHECK-512-NEXT: ret <vscale x 16 x i1> [[TMP1]]
|
||||
//
|
||||
|
|
|
@ -17268,27 +17268,35 @@ Arguments:
|
|||
""""""""""
|
||||
The argument to this intrinsic must be a vector of floating-point values.
|
||||
|
||||
'``llvm.experimental.vector.insert``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
'``llvm.vector.insert``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
This is an overloaded intrinsic. You can use ``llvm.experimental.vector.insert``
|
||||
to insert a fixed-width vector into a scalable vector, but not the other way
|
||||
around.
|
||||
This is an overloaded intrinsic.
|
||||
|
||||
::
|
||||
|
||||
declare <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> %vec, <4 x float> %subvec, i64 %idx)
|
||||
declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> %vec, <2 x double> %subvec, i64 %idx)
|
||||
; Insert fixed type into scalable type
|
||||
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> %vec, <4 x float> %subvec, i64 <idx>)
|
||||
declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> %vec, <2 x double> %subvec, i64 <idx>)
|
||||
|
||||
; Insert scalable type into scalable type
|
||||
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x float> %vec, <vscale x 2 x float> %subvec, i64 <idx>)
|
||||
|
||||
; Insert fixed type into fixed type
|
||||
declare <4 x double> @llvm.vector.insert.v4f64.v2f64(<4 x double> %vec, <2 x double> %subvec, i64 <idx>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.vector.insert.*``' intrinsics insert a vector into another vector
|
||||
The '``llvm.vector.insert.*``' intrinsics insert a vector into another vector
|
||||
starting from a given index. The return type matches the type of the vector we
|
||||
insert into. Conceptually, this can be used to build a scalable vector out of
|
||||
non-scalable vectors.
|
||||
non-scalable vectors, however this intrinsic can also be used on purely fixed
|
||||
types.
|
||||
|
||||
Scalable vectors can only be inserted into other scalable vectors.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
@ -17306,27 +17314,35 @@ cannot be determined statically but is false at runtime, then the result vector
|
|||
is undefined.
|
||||
|
||||
|
||||
'``llvm.experimental.vector.extract``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
'``llvm.vector.extract``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
This is an overloaded intrinsic. You can use
|
||||
``llvm.experimental.vector.extract`` to extract a fixed-width vector from a
|
||||
scalable vector, but not the other way around.
|
||||
This is an overloaded intrinsic.
|
||||
|
||||
::
|
||||
|
||||
declare <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32(<vscale x 4 x float> %vec, i64 %idx)
|
||||
declare <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> %vec, i64 %idx)
|
||||
; Extract fixed type from scalable type
|
||||
declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float> %vec, i64 <idx>)
|
||||
declare <2 x double> @llvm.vector.extract.v2f64.nxv2f64(<vscale x 2 x double> %vec, i64 <idx>)
|
||||
|
||||
; Extract scalable type from scalable type
|
||||
declare <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> %vec, i64 <idx>)
|
||||
|
||||
; Extract fixed type from fixed type
|
||||
declare <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %vec, i64 <idx>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.vector.extract.*``' intrinsics extract a vector from
|
||||
within another vector starting from a given index. The return type must be
|
||||
explicitly specified. Conceptually, this can be used to decompose a scalable
|
||||
vector into non-scalable parts.
|
||||
The '``llvm.vector.extract.*``' intrinsics extract a vector from within another
|
||||
vector starting from a given index. The return type must be explicitly
|
||||
specified. Conceptually, this can be used to decompose a scalable vector into
|
||||
non-scalable parts, however this intrinsic can also be used on purely fixed
|
||||
types.
|
||||
|
||||
Scalable vectors can only be extracted from other scalable vectors.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
|
|
@ -66,6 +66,9 @@ versions of these toolchains.
|
|||
Changes to the LLVM IR
|
||||
----------------------
|
||||
|
||||
* Renamed ``llvm.experimental.vector.extract`` intrinsic to ``llvm.vector.extract``.
|
||||
* Renamed ``llvm.experimental.vector.insert`` intrinsic to ``llvm.vector.insert``.
|
||||
|
||||
Changes to building LLVM
|
||||
------------------------
|
||||
|
||||
|
|
|
@ -1474,7 +1474,7 @@ public:
|
|||
// The cost of materialising a constant integer vector.
|
||||
return TargetTransformInfo::TCC_Basic;
|
||||
}
|
||||
case Intrinsic::experimental_vector_extract: {
|
||||
case Intrinsic::vector_extract: {
|
||||
// FIXME: Handle case where a scalable vector is extracted from a scalable
|
||||
// vector
|
||||
if (isa<ScalableVectorType>(RetTy))
|
||||
|
@ -1484,7 +1484,7 @@ public:
|
|||
cast<VectorType>(Args[0]->getType()), None,
|
||||
Index, cast<VectorType>(RetTy));
|
||||
}
|
||||
case Intrinsic::experimental_vector_insert: {
|
||||
case Intrinsic::vector_insert: {
|
||||
// FIXME: Handle case where a scalable vector is inserted into a scalable
|
||||
// vector
|
||||
if (isa<ScalableVectorType>(Args[1]->getType()))
|
||||
|
|
|
@ -914,18 +914,18 @@ public:
|
|||
Name);
|
||||
}
|
||||
|
||||
/// Create a call to the experimental.vector.extract intrinsic.
|
||||
/// Create a call to the vector.extract intrinsic.
|
||||
CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
|
||||
const Twine &Name = "") {
|
||||
return CreateIntrinsic(Intrinsic::experimental_vector_extract,
|
||||
return CreateIntrinsic(Intrinsic::vector_extract,
|
||||
{DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
|
||||
Name);
|
||||
}
|
||||
|
||||
/// Create a call to the experimental.vector.insert intrinsic.
|
||||
/// Create a call to the vector.insert intrinsic.
|
||||
CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
|
||||
Value *Idx, const Twine &Name = "") {
|
||||
return CreateIntrinsic(Intrinsic::experimental_vector_insert,
|
||||
return CreateIntrinsic(Intrinsic::vector_insert,
|
||||
{DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
|
||||
nullptr, Name);
|
||||
}
|
||||
|
|
|
@ -1961,13 +1961,13 @@ def int_experimental_vector_splice : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
|
|||
def int_vscale : DefaultAttrsIntrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
|
||||
|
||||
//===---------- Intrinsics to perform subvector insertion/extraction ------===//
|
||||
def int_experimental_vector_insert : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
|
||||
def int_vector_insert : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
|
||||
|
||||
def int_experimental_vector_extract : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
|
||||
def int_vector_extract : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
|
||||
|
||||
//===----------------- Pointer Authentication Intrinsics ------------------===//
|
||||
//
|
||||
|
|
|
@ -6021,14 +6021,14 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
|
|||
|
||||
break;
|
||||
}
|
||||
case Intrinsic::experimental_vector_extract: {
|
||||
case Intrinsic::vector_extract: {
|
||||
Type *ReturnType = F->getReturnType();
|
||||
|
||||
// (extract_vector (insert_vector _, X, 0), 0) -> X
|
||||
unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
|
||||
Value *X = nullptr;
|
||||
if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
|
||||
m_Value(), m_Value(X), m_Zero())) &&
|
||||
if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(), m_Value(X),
|
||||
m_Zero())) &&
|
||||
IdxN == 0 && X->getType() == ReturnType)
|
||||
return X;
|
||||
|
||||
|
@ -6169,7 +6169,7 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
|
|||
|
||||
return nullptr;
|
||||
}
|
||||
case Intrinsic::experimental_vector_insert: {
|
||||
case Intrinsic::vector_insert: {
|
||||
Value *Vec = Call->getArgOperand(0);
|
||||
Value *SubVec = Call->getArgOperand(1);
|
||||
Value *Idx = Call->getArgOperand(2);
|
||||
|
@ -6179,8 +6179,8 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
|
|||
// where: Y is X, or Y is undef
|
||||
unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
|
||||
Value *X = nullptr;
|
||||
if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
|
||||
m_Value(X), m_Zero())) &&
|
||||
if (match(SubVec,
|
||||
m_Intrinsic<Intrinsic::vector_extract>(m_Value(X), m_Zero())) &&
|
||||
(Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
|
||||
X->getType() == ReturnType)
|
||||
return X;
|
||||
|
|
|
@ -7206,7 +7206,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
|
|||
setValue(&I, SetCC);
|
||||
return;
|
||||
}
|
||||
case Intrinsic::experimental_vector_insert: {
|
||||
case Intrinsic::vector_insert: {
|
||||
SDValue Vec = getValue(I.getOperand(0));
|
||||
SDValue SubVec = getValue(I.getOperand(1));
|
||||
SDValue Index = getValue(I.getOperand(2));
|
||||
|
@ -7223,7 +7223,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
|
|||
Index));
|
||||
return;
|
||||
}
|
||||
case Intrinsic::experimental_vector_extract: {
|
||||
case Intrinsic::vector_extract: {
|
||||
SDValue Vec = getValue(I.getOperand(0));
|
||||
SDValue Index = getValue(I.getOperand(1));
|
||||
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
|
||||
|
|
|
@ -748,6 +748,23 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
|||
break;
|
||||
}
|
||||
case 'e': {
|
||||
if (Name.startswith("experimental.vector.extract.")) {
|
||||
rename(F);
|
||||
Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
|
||||
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
||||
Intrinsic::vector_extract, Tys);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (Name.startswith("experimental.vector.insert.")) {
|
||||
rename(F);
|
||||
auto Args = F->getFunctionType()->params();
|
||||
Type *Tys[] = {Args[0], Args[1]};
|
||||
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
||||
Intrinsic::vector_insert, Tys);
|
||||
return true;
|
||||
}
|
||||
|
||||
SmallVector<StringRef, 2> Groups;
|
||||
static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+");
|
||||
if (R.match(Name, &Groups)) {
|
||||
|
|
|
@ -5512,7 +5512,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
|||
&Call);
|
||||
break;
|
||||
}
|
||||
case Intrinsic::experimental_vector_insert: {
|
||||
case Intrinsic::vector_insert: {
|
||||
Value *Vec = Call.getArgOperand(0);
|
||||
Value *SubVec = Call.getArgOperand(1);
|
||||
Value *Idx = Call.getArgOperand(2);
|
||||
|
@ -5524,11 +5524,11 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
|||
ElementCount VecEC = VecTy->getElementCount();
|
||||
ElementCount SubVecEC = SubVecTy->getElementCount();
|
||||
Check(VecTy->getElementType() == SubVecTy->getElementType(),
|
||||
"experimental_vector_insert parameters must have the same element "
|
||||
"vector_insert parameters must have the same element "
|
||||
"type.",
|
||||
&Call);
|
||||
Check(IdxN % SubVecEC.getKnownMinValue() == 0,
|
||||
"experimental_vector_insert index must be a constant multiple of "
|
||||
"vector_insert index must be a constant multiple of "
|
||||
"the subvector's known minimum vector length.");
|
||||
|
||||
// If this insertion is not the 'mixed' case where a fixed vector is
|
||||
|
@ -5537,12 +5537,12 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
|||
if (VecEC.isScalable() == SubVecEC.isScalable()) {
|
||||
Check(IdxN < VecEC.getKnownMinValue() &&
|
||||
IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
|
||||
"subvector operand of experimental_vector_insert would overrun the "
|
||||
"subvector operand of vector_insert would overrun the "
|
||||
"vector being inserted into.");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::experimental_vector_extract: {
|
||||
case Intrinsic::vector_extract: {
|
||||
Value *Vec = Call.getArgOperand(0);
|
||||
Value *Idx = Call.getArgOperand(1);
|
||||
unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
|
||||
|
@ -5554,11 +5554,11 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
|||
ElementCount ResultEC = ResultTy->getElementCount();
|
||||
|
||||
Check(ResultTy->getElementType() == VecTy->getElementType(),
|
||||
"experimental_vector_extract result must have the same element "
|
||||
"vector_extract result must have the same element "
|
||||
"type as the input vector.",
|
||||
&Call);
|
||||
Check(IdxN % ResultEC.getKnownMinValue() == 0,
|
||||
"experimental_vector_extract index must be a constant multiple of "
|
||||
"vector_extract index must be a constant multiple of "
|
||||
"the result type's known minimum vector length.");
|
||||
|
||||
// If this extraction is not the 'mixed' case where a fixed vector is is
|
||||
|
@ -5567,7 +5567,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
|
|||
if (VecEC.isScalable() == ResultEC.isScalable()) {
|
||||
Check(IdxN < VecEC.getKnownMinValue() &&
|
||||
IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
|
||||
"experimental_vector_extract would overrun.");
|
||||
"vector_extract would overrun.");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -652,8 +652,7 @@ static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
|
|||
return None;
|
||||
|
||||
auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
|
||||
if (!VecIns ||
|
||||
VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert)
|
||||
if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert)
|
||||
return None;
|
||||
|
||||
// Where the vector insert is a fixed constant vector insert into undef at
|
||||
|
|
|
@ -305,8 +305,7 @@ bool SVEIntrinsicOpts::optimizePredicateStore(Instruction *I) {
|
|||
|
||||
// ..where the value stored comes from a vector extract..
|
||||
auto *IntrI = dyn_cast<IntrinsicInst>(Store->getOperand(0));
|
||||
if (!IntrI ||
|
||||
IntrI->getIntrinsicID() != Intrinsic::experimental_vector_extract)
|
||||
if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::vector_extract)
|
||||
return false;
|
||||
|
||||
// ..that is extracting from index 0..
|
||||
|
@ -365,8 +364,7 @@ bool SVEIntrinsicOpts::optimizePredicateLoad(Instruction *I) {
|
|||
|
||||
// ..whose operand is a vector_insert..
|
||||
auto *IntrI = dyn_cast<IntrinsicInst>(BitCast->getOperand(0));
|
||||
if (!IntrI ||
|
||||
IntrI->getIntrinsicID() != Intrinsic::experimental_vector_insert)
|
||||
if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::vector_insert)
|
||||
return false;
|
||||
|
||||
// ..that is inserting into index zero of an undef vector..
|
||||
|
@ -451,8 +449,8 @@ bool SVEIntrinsicOpts::runOnModule(Module &M) {
|
|||
continue;
|
||||
|
||||
switch (F.getIntrinsicID()) {
|
||||
case Intrinsic::experimental_vector_extract:
|
||||
case Intrinsic::experimental_vector_insert:
|
||||
case Intrinsic::vector_extract:
|
||||
case Intrinsic::vector_insert:
|
||||
case Intrinsic::aarch64_sve_ptrue:
|
||||
for (User *U : F.users())
|
||||
Functions.insert(cast<Instruction>(U)->getFunction());
|
||||
|
|
|
@ -2357,7 +2357,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::experimental_vector_insert: {
|
||||
case Intrinsic::vector_insert: {
|
||||
Value *Vec = II->getArgOperand(0);
|
||||
Value *SubVec = II->getArgOperand(1);
|
||||
Value *Idx = II->getArgOperand(2);
|
||||
|
@ -2403,7 +2403,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::experimental_vector_extract: {
|
||||
case Intrinsic::vector_extract: {
|
||||
Value *Vec = II->getArgOperand(0);
|
||||
Value *Idx = II->getArgOperand(1);
|
||||
|
||||
|
|
|
@ -3,22 +3,22 @@
|
|||
|
||||
define void @vector_insert_extract(<vscale x 4 x i32> %v0, <vscale x 16 x i32> %v1, <16 x i32> %v2) {
|
||||
; CHECK-LABEL: 'vector_insert_extract'
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
|
||||
;
|
||||
%extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
%insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
%extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
%insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
%extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
%insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
%extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
%insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
ret void
|
||||
}
|
||||
declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32>, <16 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32>, i64)
|
||||
declare <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32>, <16 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32>, i64)
|
||||
declare <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64)
|
||||
|
||||
|
||||
define void @reductions(<vscale x 4 x i32> %v0, <vscale x 4 x i64> %v1, <vscale x 4 x float> %v2, <vscale x 4 x double> %v3) {
|
||||
|
|
|
@ -36,22 +36,22 @@ define void @vector_broadcast() {
|
|||
|
||||
define void @vector_insert_extract(<vscale x 4 x i32> %v0, <vscale x 16 x i32> %v1, <16 x i32> %v2) {
|
||||
; CHECK-LABEL: 'vector_insert_extract'
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
|
||||
;
|
||||
%extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
%insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
%extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
%insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
%extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> %v0, i64 0)
|
||||
%insert_fixed_into_scalable = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> %v0, <16 x i32> %v2, i64 0)
|
||||
%extract_scalable_from_scalable = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %v1, i64 0)
|
||||
%insert_scalable_into_scalable = call <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> %v1, <vscale x 4 x i32> %v0, i64 0)
|
||||
ret void
|
||||
}
|
||||
declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32>, <16 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32>, i64)
|
||||
declare <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32>, <16 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32>, i64)
|
||||
declare <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64)
|
||||
|
||||
define void @vector_reverse() {
|
||||
; CHECK-LABEL: 'vector_reverse'
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
; RUN: opt -S < %s | FileCheck %s
|
||||
; RUN: llvm-dis < %s.bc | FileCheck %s
|
||||
|
||||
define <vscale x 16 x i8> @insert(<vscale x 16 x i8> %a, <4 x i8> %b) {
|
||||
; CHECK-LABEL: @insert
|
||||
; CHECK: %res = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v4i8(<vscale x 16 x i8> %a, <4 x i8> %b, i64 0)
|
||||
%res = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v4i8(<vscale x 16 x i8> %a, <4 x i8> %b, i64 0)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <4 x i8> @extract(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: @extract
|
||||
; CHECK: %res = call <4 x i8> @llvm.vector.extract.v4i8.nxv16i8(<vscale x 16 x i8> %a, i64 0)
|
||||
%res = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv16i8(<vscale x 16 x i8> %a, i64 0)
|
||||
ret <4 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v4i8(<vscale x 16 x i8>, <4 x i8>, i64 immarg)
|
||||
; CHECK: declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v4i8(<vscale x 16 x i8>, <4 x i8>, i64 immarg)
|
||||
|
||||
declare <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv16i8(<vscale x 16 x i8>, i64 immarg)
|
||||
; CHECK: declare <4 x i8> @llvm.vector.extract.v4i8.nxv16i8(<vscale x 16 x i8>, i64 immarg)
|
Binary file not shown.
|
@ -8,18 +8,18 @@
|
|||
target triple = "aarch64-unknown-linux-gnu"
|
||||
attributes #0 = {"target-features"="+sve"}
|
||||
|
||||
declare <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32(<vscale x 4 x float>, i64)
|
||||
declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
|
||||
declare <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float>, i64)
|
||||
declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
|
||||
|
||||
define <vscale x 2 x double> @reproducer_one(<vscale x 4 x float> %vec_a) #0 {
|
||||
%a = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> %vec_a, i64 0)
|
||||
%a = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> %vec_a, i64 0)
|
||||
%b = bitcast <16 x float> %a to <8 x double>
|
||||
%retval = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> %b, i64 0)
|
||||
%retval = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> %b, i64 0)
|
||||
ret <vscale x 2 x double> %retval
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @reproducer_two(<4 x double> %a, <4 x double> %b) #0 {
|
||||
%concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
|
||||
%retval = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> %concat, i64 0)
|
||||
%retval = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> %concat, i64 0)
|
||||
ret <vscale x 2 x double> %retval
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_zero_i8(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
|
||||
%subvec = load <vscale x 4 x i8>, <vscale x 4 x i8>* %b
|
||||
%ins = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> %vec, <vscale x 4 x i8> %subvec, i64 0)
|
||||
%ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> %vec, <vscale x 4 x i8> %subvec, i64 0)
|
||||
ret <vscale x 8 x i8> %ins
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_nonzero_i8(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
|
||||
%subvec = load <vscale x 4 x i8>, <vscale x 4 x i8>* %b
|
||||
%ins = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> %vec, <vscale x 4 x i8> %subvec, i64 4)
|
||||
%ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> %vec, <vscale x 4 x i8> %subvec, i64 4)
|
||||
ret <vscale x 8 x i8> %ins
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_zero_i16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
|
||||
%subvec = load <vscale x 2 x i16>, <vscale x 2 x i16>* %b
|
||||
%ins = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> %vec, <vscale x 2 x i16> %subvec, i64 0)
|
||||
%ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> %vec, <vscale x 2 x i16> %subvec, i64 0)
|
||||
ret <vscale x 4 x i16> %ins
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_nonzero_i16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
|
||||
%subvec = load <vscale x 2 x i16>, <vscale x 2 x i16>* %b
|
||||
%ins = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> %vec, <vscale x 2 x i16> %subvec, i64 2)
|
||||
%ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> %vec, <vscale x 2 x i16> %subvec, i64 2)
|
||||
ret <vscale x 4 x i16> %ins
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_zero_i8(<vscale x 8 x i8
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
|
||||
%subvec = load <8 x i8>, <8 x i8>* %b
|
||||
%ins = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> %vec, <8 x i8> %subvec, i64 0)
|
||||
%ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> %vec, <8 x i8> %subvec, i64 0)
|
||||
ret <vscale x 8 x i8> %ins
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
|
||||
%subvec = load <8 x i8>, <8 x i8>* %b
|
||||
%ins = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> %vec, <8 x i8> %subvec, i64 8)
|
||||
%ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> %vec, <8 x i8> %subvec, i64 8)
|
||||
ret <vscale x 8 x i8> %ins
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_zero_i16(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
|
||||
%subvec = load <4 x i16>, <4 x i16>* %b
|
||||
%ins = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16> %vec, <4 x i16> %subvec, i64 0)
|
||||
%ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16> %vec, <4 x i16> %subvec, i64 0)
|
||||
ret <vscale x 4 x i16> %ins
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
|
||||
%subvec = load <4 x i16>, <4 x i16>* %b
|
||||
%ins = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16> %vec, <4 x i16> %subvec, i64 4)
|
||||
%ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16> %vec, <4 x i16> %subvec, i64 4)
|
||||
ret <vscale x 4 x i16> %ins
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_zero_i32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
|
||||
%subvec = load <2 x i32>, <2 x i32>* %b
|
||||
%ins = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32> %vec, <2 x i32> %subvec, i64 0)
|
||||
%ins = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32> %vec, <2 x i32> %subvec, i64 0)
|
||||
ret <vscale x 2 x i32> %ins
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
|
||||
%subvec = load <2 x i32>, <2 x i32>* %b
|
||||
%ins = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32> %vec, <2 x i32> %subvec, i64 2)
|
||||
%ins = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32> %vec, <2 x i32> %subvec, i64 2)
|
||||
ret <vscale x 2 x i32> %ins
|
||||
}
|
||||
|
||||
|
@ -228,18 +228,18 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(<vsca
|
|||
; CHECK-NEXT: ret
|
||||
%vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
|
||||
%subvec = load <8 x i32>, <8 x i32>* %b
|
||||
%ins = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> %vec, <8 x i32> %subvec, i64 8)
|
||||
%ins = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> %vec, <8 x i32> %subvec, i64 8)
|
||||
ret <vscale x 2 x i32> %ins
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8>, <vscale x 4 x i8>, i64)
|
||||
declare <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16>, <vscale x 2 x i16>, i64)
|
||||
declare <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8>, <vscale x 4 x i8>, i64)
|
||||
declare <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16>, <vscale x 2 x i16>, i64)
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8>, <8 x i8>, i64)
|
||||
declare <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16>, <4 x i16>, i64)
|
||||
declare <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32>, <2 x i32>, i64)
|
||||
declare <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8>, <8 x i8>, i64)
|
||||
declare <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16>, <4 x i16>, i64)
|
||||
declare <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32>, <2 x i32>, i64)
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32>, <8 x i32>, i64)
|
||||
declare <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32>, <8 x i32>, i64)
|
||||
|
||||
attributes #0 = { nounwind "target-features"="+sve" }
|
||||
attributes #1 = { nounwind "target-features"="+sve" vscale_range(4,4) }
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
target triple = "aarch64-unknown-linux-gnu"
|
||||
attributes #0 = {"target-features"="+sve" uwtable}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64>, <8 x i64>, i64)
|
||||
declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64>, <8 x i64>, i64)
|
||||
declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double>, <8 x double>, i64)
|
||||
|
||||
define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %b) #0 {
|
||||
; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2i64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0>
|
||||
|
@ -61,7 +61,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
|
|||
|
||||
|
||||
|
||||
%r = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> %a, <8 x i64> %b, i64 0)
|
||||
%r = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> %a, <8 x i64> %b, i64 0)
|
||||
ret <vscale x 2 x i64> %r
|
||||
}
|
||||
|
||||
|
@ -118,6 +118,6 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
|
|||
|
||||
|
||||
|
||||
%r = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> %a, <8 x double> %b, i64 0)
|
||||
%r = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> %a, <8 x double> %b, i64 0)
|
||||
ret <vscale x 2 x double> %r
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
; CHECK-ERROR: ERROR: Extracting a fixed-length vector from an illegal scalable vector is not yet supported
|
||||
define <4 x i32> @extract_v4i32_nxv16i32_12(<vscale x 16 x i32> %arg) {
|
||||
%ext = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv16i32(<vscale x 16 x i32> %arg, i64 12)
|
||||
%ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv16i32(<vscale x 16 x i32> %arg, i64 12)
|
||||
ret <4 x i32> %ext
|
||||
}
|
||||
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv16i32(<vscale x 16 x i32>, i64)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.nxv16i32(<vscale x 16 x i32>, i64)
|
||||
|
|
|
@ -7,7 +7,7 @@ define <2 x i64> @extract_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 0)
|
||||
%retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 0)
|
||||
ret <2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ define <2 x i64> @extract_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec) nounwind {
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
|
||||
%retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
|
||||
ret <2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@ define <4 x i32> @extract_v4i32_nxv4i32(<vscale x 4 x i32> %vec) nounwind {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
|
||||
%retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
|
||||
ret <4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ define <4 x i32> @extract_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec) nounwind {
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 4)
|
||||
%retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 4)
|
||||
ret <4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ define <4 x i32> @extract_v4i32_nxv2i32(<vscale x 2 x i32> %vec) nounwind #1 {
|
|||
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
|
||||
%retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
|
||||
ret <4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ define <4 x i32> @extract_v4i32_nxv2i32_idx4(<vscale x 2 x i32> %vec) nounwind #
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %vec, i64 4)
|
||||
%retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %vec, i64 4)
|
||||
ret <4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ define <8 x i16> @extract_v8i16_nxv8i16(<vscale x 8 x i16> %vec) nounwind {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 0)
|
||||
%retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 0)
|
||||
ret <8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ define <8 x i16> @extract_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec) nounwind {
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 8)
|
||||
%retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 8)
|
||||
ret <8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,7 @@ define <8 x i16> @extract_v8i16_nxv4i16(<vscale x 4 x i16> %vec) nounwind #1 {
|
|||
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16> %vec, i64 0)
|
||||
%retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16> %vec, i64 0)
|
||||
ret <8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ define <8 x i16> @extract_v8i16_nxv4i16_idx8(<vscale x 4 x i16> %vec) nounwind #
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16> %vec, i64 8)
|
||||
%retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16> %vec, i64 8)
|
||||
ret <8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ define <8 x i16> @extract_v8i16_nxv2i16(<vscale x 2 x i16> %vec) nounwind #1 {
|
|||
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16> %vec, i64 0)
|
||||
%retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16> %vec, i64 0)
|
||||
ret <8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ define <8 x i16> @extract_v8i16_nxv2i16_idx8(<vscale x 2 x i16> %vec) nounwind #
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16> %vec, i64 8)
|
||||
%retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16> %vec, i64 8)
|
||||
ret <8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -219,7 +219,7 @@ define <16 x i8> @extract_v16i8_nxv16i8(<vscale x 16 x i8> %vec) nounwind {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 0)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 0)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -241,7 +241,7 @@ define <16 x i8> @extract_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec) nounwind
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 16)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 16)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,7 @@ define <16 x i8> @extract_v16i8_nxv8i8(<vscale x 8 x i8> %vec) nounwind #1 {
|
|||
; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %vec, i64 0)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %vec, i64 0)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,7 @@ define <16 x i8> @extract_v16i8_nxv8i8_idx16(<vscale x 8 x i8> %vec) nounwind #1
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %vec, i64 16)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %vec, i64 16)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -290,7 +290,7 @@ define <16 x i8> @extract_v16i8_nxv4i8(<vscale x 4 x i8> %vec) nounwind #1 {
|
|||
; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8> %vec, i64 0)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8> %vec, i64 0)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ define <16 x i8> @extract_v16i8_nxv4i8_idx16(<vscale x 4 x i8> %vec) nounwind #1
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8> %vec, i64 16)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8> %vec, i64 16)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -330,7 +330,7 @@ define <16 x i8> @extract_v16i8_nxv2i8(<vscale x 2 x i8> %vec) nounwind #1 {
|
|||
; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
|
||||
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8> %vec, i64 0)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8> %vec, i64 0)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ define <16 x i8> @extract_v16i8_nxv2i8_idx16(<vscale x 2 x i8> %vec) nounwind #1
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8> %vec, i64 16)
|
||||
%retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8> %vec, i64 16)
|
||||
ret <16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -374,7 +374,7 @@ define <2 x i1> @extract_v2i1_nxv2i1(<vscale x 2 x i1> %inmask) {
|
|||
; CHECK-NEXT: mov v0.s[1], w8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %inmask, i64 0)
|
||||
%mask = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %inmask, i64 0)
|
||||
ret <2 x i1> %mask
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ define <4 x i1> @extract_v4i1_nxv4i1(<vscale x 4 x i1> %inmask) {
|
|||
; CHECK-NEXT: mov v0.h[3], w8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <4 x i1> @llvm.experimental.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1> %inmask, i64 0)
|
||||
%mask = call <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1> %inmask, i64 0)
|
||||
ret <4 x i1> %mask
|
||||
}
|
||||
|
||||
|
@ -416,7 +416,7 @@ define <8 x i1> @extract_v8i1_nxv8i1(<vscale x 8 x i1> %inmask) {
|
|||
; CHECK-NEXT: mov v0.b[7], w8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1> %inmask, i64 0)
|
||||
%mask = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1> %inmask, i64 0)
|
||||
ret <8 x i1> %mask
|
||||
}
|
||||
|
||||
|
@ -456,7 +456,7 @@ define <16 x i1> @extract_v16i1_nxv16i1(<vscale x 16 x i1> %inmask) {
|
|||
; CHECK-NEXT: mov v0.b[14], w9
|
||||
; CHECK-NEXT: mov v0.b[15], w8
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <16 x i1> @llvm.experimental.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1> %inmask, i64 0)
|
||||
%mask = call <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1> %inmask, i64 0)
|
||||
ret <16 x i1> %mask
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ define <2 x i64> @extract_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
|
||||
%retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
|
||||
ret <2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -504,7 +504,7 @@ define <4 x i64> @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> %vec, i64 4)
|
||||
%retval = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> %vec, i64 4)
|
||||
ret <4 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -520,7 +520,7 @@ entry:
|
|||
%ptr = getelementptr inbounds i32, i32* %addr, i64 %idx
|
||||
%bc = bitcast i32* %ptr to <vscale x 4 x i32>*
|
||||
%ld = load <vscale x 4 x i32>, <vscale x 4 x i32>* %bc, align 16
|
||||
%out = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %ld, i64 0)
|
||||
%out = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %ld, i64 0)
|
||||
ret <4 x i32> %out
|
||||
}
|
||||
|
||||
|
@ -536,7 +536,7 @@ define <2 x float> @extract_v2f32_nxv4f32_splat(float %f) {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 4 x float> poison, float %f, i32 0
|
||||
%splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
|
||||
%ext = call <2 x float> @llvm.experimental.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
%ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
ret <2 x float> %ext
|
||||
}
|
||||
|
||||
|
@ -547,7 +547,7 @@ define <2 x float> @extract_v2f32_nxv4f32_splat_const() {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 4 x float> poison, float 1.0, i32 0
|
||||
%splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
|
||||
%ext = call <2 x float> @llvm.experimental.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
%ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
ret <2 x float> %ext
|
||||
}
|
||||
|
||||
|
@ -558,32 +558,32 @@ define <4 x i32> @extract_v4i32_nxv8i32_splat_const() {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i32> %ins, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
|
||||
%ext = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32> %splat, i64 0)
|
||||
%ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32> %splat, i64 0)
|
||||
ret <4 x i32> %ext
|
||||
}
|
||||
|
||||
attributes #0 = { vscale_range(2,2) }
|
||||
attributes #1 = { vscale_range(8,8) }
|
||||
|
||||
declare <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32>, i64)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32>, i64)
|
||||
|
||||
declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16>, i64)
|
||||
declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16>, i64)
|
||||
declare <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16>, i64)
|
||||
declare <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16>, i64)
|
||||
|
||||
declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8>, i64)
|
||||
declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8>, i64)
|
||||
declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
declare <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8>, i64)
|
||||
declare <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8>, i64)
|
||||
declare <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
|
||||
declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1>, i64)
|
||||
declare <4 x i1> @llvm.experimental.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1>, i64)
|
||||
declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1>, i64)
|
||||
declare <16 x i1> @llvm.experimental.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1>, i64)
|
||||
declare <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1>, i64)
|
||||
declare <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1>, i64)
|
||||
declare <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
|
||||
declare <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <2 x float> @llvm.experimental.vector.extract.v2f32.nxv4f32(<vscale x 4 x float>, i64)
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32>, i64)
|
||||
declare <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float>, i64)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32>, i64)
|
||||
|
|
|
@ -7,7 +7,7 @@ define <vscale x 1 x i32> @extract_nxv1i32_nxv4i32(<vscale x 4 x i32> %vec) noun
|
|||
; CHECK-LABEL: extract_nxv1i32_nxv4i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
|
||||
%retval = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
|
||||
ret <vscale x 1 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -15,12 +15,12 @@ define <vscale x 1 x i16> @extract_nxv1i16_nxv6i16(<vscale x 6 x i16> %vec) noun
|
|||
; CHECK-LABEL: extract_nxv1i16_nxv6i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv6i16(<vscale x 6 x i16> %vec, i64 0)
|
||||
%retval = call <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv6i16(<vscale x 6 x i16> %vec, i64 0)
|
||||
ret <vscale x 1 x i16> %retval
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv6i16(<vscale x 6 x i16>, i64)
|
||||
declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 1 x i16> @llvm.vector.extract.nxv1i16.nxv6i16(<vscale x 6 x i16>, i64)
|
||||
|
||||
;
|
||||
; Extract half i1 vector that needs promotion from legal type.
|
||||
|
@ -30,7 +30,7 @@ define <vscale x 8 x i1> @extract_nxv8i1_nxv16i1_0(<vscale x 16 x i1> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %in, i64 0)
|
||||
%res = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %in, i64 0)
|
||||
ret <vscale x 8 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -39,11 +39,11 @@ define <vscale x 8 x i1> @extract_nxv8i1_nxv16i1_8(<vscale x 16 x i1> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %in, i64 8)
|
||||
%res = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %in, i64 8)
|
||||
ret <vscale x 8 x i1> %res
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
|
||||
;
|
||||
; Extract i1 vector that needs widening from one that needs widening.
|
||||
|
@ -52,7 +52,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_0(<vscale x 28 x i1> %in) {
|
|||
; CHECK-LABEL: extract_nxv14i1_nxv28i1_0:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 14 x i1> @llvm.experimental.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1> %in, i64 0)
|
||||
%res = call <vscale x 14 x i1> @llvm.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1> %in, i64 0)
|
||||
ret <vscale x 14 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -95,11 +95,11 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uw
|
|||
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
||||
; CHECK-NEXT: .cfi_restore w29
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 14 x i1> @llvm.experimental.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1> %in, i64 14)
|
||||
%res = call <vscale x 14 x i1> @llvm.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1> %in, i64 14)
|
||||
ret <vscale x 14 x i1> %res
|
||||
}
|
||||
|
||||
declare <vscale x 14 x i1> @llvm.experimental.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1>, i64)
|
||||
declare <vscale x 14 x i1> @llvm.vector.extract.nxv14i1.nxv28i1(<vscale x 28 x i1>, i64)
|
||||
|
||||
;
|
||||
; Extract half i1 vector that needs promotion from one that needs splitting.
|
||||
|
@ -109,7 +109,7 @@ define <vscale x 8 x i1> @extract_nxv8i1_nxv32i1_0(<vscale x 32 x i1> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 0)
|
||||
%res = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 0)
|
||||
ret <vscale x 8 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ define <vscale x 8 x i1> @extract_nxv8i1_nxv32i1_8(<vscale x 32 x i1> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 8)
|
||||
%res = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 8)
|
||||
ret <vscale x 8 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ define <vscale x 8 x i1> @extract_nxv8i1_nxv32i1_16(<vscale x 32 x i1> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: punpklo p0.h, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 16)
|
||||
%res = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 16)
|
||||
ret <vscale x 8 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -136,11 +136,11 @@ define <vscale x 8 x i1> @extract_nxv8i1_nxv32i1_24(<vscale x 32 x i1> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: punpkhi p0.h, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 24)
|
||||
%res = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1> %in, i64 24)
|
||||
ret <vscale x 8 x i1> %res
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv32i1(<vscale x 32 x i1>, i64)
|
||||
|
||||
;
|
||||
; Extract 1/4th i1 vector that needs promotion from legal type.
|
||||
|
@ -151,7 +151,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv16i1_0(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 0)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 0)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv16i1_4(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 4)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 4)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv16i1_8(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 8)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 8)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -181,11 +181,11 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv16i1_12(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 12)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %in, i64 12)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
|
||||
;
|
||||
; Extract 1/8th i1 vector that needs promotion from legal type.
|
||||
|
@ -197,7 +197,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_0(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 0)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 0)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_2(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 2)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 2)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -219,7 +219,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_4(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 4)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 4)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_6(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 6)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 6)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -241,7 +241,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_8(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 8)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 8)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_10(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 10)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 10)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -263,7 +263,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_12(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 12)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 12)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -274,11 +274,11 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_14(<vscale x 16 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 14)
|
||||
%res = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %in, i64 14)
|
||||
ret <vscale x 2 x i1> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
|
||||
;
|
||||
; Extract i1 vector that needs promotion from one that needs widening.
|
||||
|
@ -289,7 +289,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv12i1_0(<vscale x 12 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1> %in, i64 0)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1> %in, i64 0)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -299,7 +299,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv12i1_4(<vscale x 12 x i1> %in) {
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1> %in, i64 4)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1> %in, i64 4)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
|
@ -309,11 +309,11 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv12i1_8(<vscale x 12 x i1> %in) {
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1> %in, i64 8)
|
||||
%res = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1> %in, i64 8)
|
||||
ret <vscale x 4 x i1> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv12i1(<vscale x 12 x i1>, i64)
|
||||
|
||||
;
|
||||
; Extract 1/8th i8 vector that needs promotion from legal type.
|
||||
|
@ -325,7 +325,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_0(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 0)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 0)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_2(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 2)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 2)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_4(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 4)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 4)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -358,7 +358,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_6(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 6)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 6)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -369,7 +369,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_8(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 8)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 8)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -380,7 +380,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_10(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 10)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 10)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_12(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 12)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 12)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -402,11 +402,11 @@ define <vscale x 2 x i8> @extract_nxv2i8_nxv16i8_14(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 14)
|
||||
%res = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> %in, i64 14)
|
||||
ret <vscale x 2 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract i8 vector that needs promotion from one that needs widening.
|
||||
|
@ -417,7 +417,7 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv12i8_0(<vscale x 12 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8> %in, i64 0)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8> %in, i64 0)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -427,7 +427,7 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv12i8_4(<vscale x 12 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8> %in, i64 4)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8> %in, i64 4)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -437,11 +437,11 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv12i8_8(<vscale x 12 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.h, z0.b
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8> %in, i64 8)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8> %in, i64 8)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8>, i64)
|
||||
declare <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv12i8(<vscale x 12 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract i8 vector that needs both widening + promotion from one that needs widening.
|
||||
|
@ -452,7 +452,7 @@ define <vscale x 6 x i8> @extract_nxv6i8_nxv12i8_0(<vscale x 12 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x i8> @llvm.experimental.vector.extract.nxv6i8.nxv12i8(<vscale x 12 x i8> %in, i64 0)
|
||||
%res = call <vscale x 6 x i8> @llvm.vector.extract.nxv6i8.nxv12i8(<vscale x 12 x i8> %in, i64 0)
|
||||
ret <vscale x 6 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -470,11 +470,11 @@ define <vscale x 6 x i8> @extract_nxv6i8_nxv12i8_6(<vscale x 12 x i8> %in) {
|
|||
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x i8> @llvm.experimental.vector.extract.nxv6i8.nxv12i8(<vscale x 12 x i8> %in, i64 6)
|
||||
%res = call <vscale x 6 x i8> @llvm.vector.extract.nxv6i8.nxv12i8(<vscale x 12 x i8> %in, i64 6)
|
||||
ret <vscale x 6 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 6 x i8> @llvm.experimental.vector.extract.nxv6i8.nxv12i8(<vscale x 12 x i8>, i64)
|
||||
declare <vscale x 6 x i8> @llvm.vector.extract.nxv6i8.nxv12i8(<vscale x 12 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract half i8 vector that needs promotion from one that needs splitting.
|
||||
|
@ -484,7 +484,7 @@ define <vscale x 8 x i8> @extract_nxv8i8_nxv32i8_0(<vscale x 32 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 0)
|
||||
%res = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 0)
|
||||
ret <vscale x 8 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -493,7 +493,7 @@ define <vscale x 8 x i8> @extract_nxv8i8_nxv32i8_8(<vscale x 32 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.h, z0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 8)
|
||||
%res = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 8)
|
||||
ret <vscale x 8 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ define <vscale x 8 x i8> @extract_nxv8i8_nxv32i8_16(<vscale x 32 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.h, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 16)
|
||||
%res = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 16)
|
||||
ret <vscale x 8 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -511,11 +511,11 @@ define <vscale x 8 x i8> @extract_nxv8i8_nxv32i8_24(<vscale x 32 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.h, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 24)
|
||||
%res = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> %in, i64 24)
|
||||
ret <vscale x 8 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract half i8 vector that needs promotion from legal type.
|
||||
|
@ -525,7 +525,7 @@ define <vscale x 8 x i8> @extract_nxv8i8_nxv16i8_0(<vscale x 16 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> %in, i64 0)
|
||||
%res = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> %in, i64 0)
|
||||
ret <vscale x 8 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -534,11 +534,11 @@ define <vscale x 8 x i8> @extract_nxv8i8_nxv16i8_8(<vscale x 16 x i8> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.h, z0.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> %in, i64 8)
|
||||
%res = call <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> %in, i64 8)
|
||||
ret <vscale x 8 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i8> @llvm.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract i8 vector that needs widening from one that needs widening.
|
||||
|
@ -547,7 +547,7 @@ define <vscale x 14 x i8> @extract_nxv14i8_nxv28i8_0(<vscale x 28 x i8> %in) {
|
|||
; CHECK-LABEL: extract_nxv14i8_nxv28i8_0:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 14 x i8> @llvm.experimental.vector.extract.nxv14i8.nxv28i8(<vscale x 28 x i8> %in, i64 0)
|
||||
%res = call <vscale x 14 x i8> @llvm.vector.extract.nxv14i8.nxv28i8(<vscale x 28 x i8> %in, i64 0)
|
||||
ret <vscale x 14 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -621,11 +621,11 @@ define <vscale x 14 x i8> @extract_nxv14i8_nxv28i8_14(<vscale x 28 x i8> %in) {
|
|||
; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
|
||||
; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 14 x i8> @llvm.experimental.vector.extract.nxv14i8.nxv28i8(<vscale x 28 x i8> %in, i64 14)
|
||||
%res = call <vscale x 14 x i8> @llvm.vector.extract.nxv14i8.nxv28i8(<vscale x 28 x i8> %in, i64 14)
|
||||
ret <vscale x 14 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 14 x i8> @llvm.experimental.vector.extract.nxv14i8.nxv28i8(<vscale x 28 x i8>, i64)
|
||||
declare <vscale x 14 x i8> @llvm.vector.extract.nxv14i8.nxv28i8(<vscale x 28 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract 1/4th i8 vector that needs promotion from legal type.
|
||||
|
@ -636,7 +636,7 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv16i8_0(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 0)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 0)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -646,7 +646,7 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv16i8_4(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.h, z0.b
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 4)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 4)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -656,7 +656,7 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv16i8_8(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.h, z0.b
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 8)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 8)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -666,11 +666,11 @@ define <vscale x 4 x i8> @extract_nxv4i8_nxv16i8_12(<vscale x 16 x i8> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.h, z0.b
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 12)
|
||||
%res = call <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> %in, i64 12)
|
||||
ret <vscale x 4 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 4 x i8> @llvm.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
|
||||
;
|
||||
; Extract f16 vector that needs promotion from one that needs widening.
|
||||
|
@ -681,7 +681,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv6f16_0(<vscale x 6 x half> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half> %in, i64 0)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half> %in, i64 0)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
|
@ -691,7 +691,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv6f16_2(<vscale x 6 x half> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half> %in, i64 2)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half> %in, i64 2)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
|
@ -701,11 +701,11 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv6f16_4(<vscale x 6 x half> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half> %in, i64 4)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half> %in, i64 4)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half>, i64)
|
||||
declare <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv6f16(<vscale x 6 x half>, i64)
|
||||
|
||||
;
|
||||
; Extract half f16 vector that needs promotion from legal type.
|
||||
|
@ -715,7 +715,7 @@ define <vscale x 4 x half> @extract_nxv4f16_nxv8f16_0(<vscale x 8 x half> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> %in, i64 0)
|
||||
%res = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> %in, i64 0)
|
||||
ret <vscale x 4 x half> %res
|
||||
}
|
||||
|
||||
|
@ -724,11 +724,11 @@ define <vscale x 4 x half> @extract_nxv4f16_nxv8f16_4(<vscale x 8 x half> %in) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> %in, i64 4)
|
||||
%res = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> %in, i64 4)
|
||||
ret <vscale x 4 x half> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half>, i64)
|
||||
declare <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half>, i64)
|
||||
|
||||
;
|
||||
; Extract f16 vector that needs widening from one that needs widening.
|
||||
|
@ -737,7 +737,7 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in)
|
|||
; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
|
||||
%res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
|
||||
ret <vscale x 6 x half> %res
|
||||
}
|
||||
|
||||
|
@ -753,11 +753,11 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in)
|
|||
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
|
||||
%res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
|
||||
ret <vscale x 6 x half> %res
|
||||
}
|
||||
|
||||
declare <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half>, i64)
|
||||
declare <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half>, i64)
|
||||
|
||||
;
|
||||
; Extract half f16 vector that needs promotion from one that needs splitting.
|
||||
|
@ -767,7 +767,7 @@ define <vscale x 4 x half> @extract_nxv4f16_nxv16f16_0(<vscale x 16 x half> %in)
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 0)
|
||||
%res = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 0)
|
||||
ret <vscale x 4 x half> %res
|
||||
}
|
||||
|
||||
|
@ -776,7 +776,7 @@ define <vscale x 4 x half> @extract_nxv4f16_nxv16f16_4(<vscale x 16 x half> %in)
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 4)
|
||||
%res = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 4)
|
||||
ret <vscale x 4 x half> %res
|
||||
}
|
||||
|
||||
|
@ -785,7 +785,7 @@ define <vscale x 4 x half> @extract_nxv4f16_nxv16f16_8(<vscale x 16 x half> %in)
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.s, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 8)
|
||||
%res = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 8)
|
||||
ret <vscale x 4 x half> %res
|
||||
}
|
||||
|
||||
|
@ -794,11 +794,11 @@ define <vscale x 4 x half> @extract_nxv4f16_nxv16f16_12(<vscale x 16 x half> %in
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.s, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 12)
|
||||
%res = call <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> %in, i64 12)
|
||||
ret <vscale x 4 x half> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half>, i64)
|
||||
declare <vscale x 4 x half> @llvm.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half>, i64)
|
||||
|
||||
;
|
||||
; Extract 1/4th f16 vector that needs promotion from legal type.
|
||||
|
@ -809,7 +809,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv8f16_0(<vscale x 8 x half> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 0)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 0)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
|
@ -819,7 +819,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv8f16_2(<vscale x 8 x half> %in) {
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 2)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 2)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
|
@ -829,7 +829,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv8f16_4(<vscale x 8 x half> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 4)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 4)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
|
@ -839,11 +839,11 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv8f16_6(<vscale x 8 x half> %in) {
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 6)
|
||||
%res = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> %in, i64 6)
|
||||
ret <vscale x 2 x half> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half>, i64)
|
||||
declare <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half>, i64)
|
||||
|
||||
;
|
||||
; Extract half bf16 vector that needs promotion from legal type.
|
||||
|
@ -853,7 +853,7 @@ define <vscale x 4 x bfloat> @extract_nxv4bf16_nxv8bf16_0(<vscale x 8 x bfloat>
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 0)
|
||||
%res = call <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 0)
|
||||
ret <vscale x 4 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -862,11 +862,11 @@ define <vscale x 4 x bfloat> @extract_nxv4bf16_nxv8bf16_4(<vscale x 8 x bfloat>
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 4)
|
||||
%res = call <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 4)
|
||||
ret <vscale x 4 x bfloat> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv8bf16(<vscale x 8 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv8bf16(<vscale x 8 x bfloat>, i64)
|
||||
|
||||
;
|
||||
; Extract bf16 vector that needs widening from one that needs widening.
|
||||
|
@ -875,7 +875,7 @@ define <vscale x 6 x bfloat> @extract_nxv6bf16_nxv12bf16_0(<vscale x 12 x bfloat
|
|||
; CHECK-LABEL: extract_nxv6bf16_nxv12bf16_0:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x bfloat> @llvm.experimental.vector.extract.nxv6bf16.nxv12bf16(<vscale x 12 x bfloat> %in, i64 0)
|
||||
%res = call <vscale x 6 x bfloat> @llvm.vector.extract.nxv6bf16.nxv12bf16(<vscale x 12 x bfloat> %in, i64 0)
|
||||
ret <vscale x 6 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -891,11 +891,11 @@ define <vscale x 6 x bfloat> @extract_nxv6bf16_nxv12bf16_6(<vscale x 12 x bfloat
|
|||
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x bfloat> @llvm.experimental.vector.extract.nxv6bf16.nxv12bf16(<vscale x 12 x bfloat> %in, i64 6)
|
||||
%res = call <vscale x 6 x bfloat> @llvm.vector.extract.nxv6bf16.nxv12bf16(<vscale x 12 x bfloat> %in, i64 6)
|
||||
ret <vscale x 6 x bfloat> %res
|
||||
}
|
||||
|
||||
declare <vscale x 6 x bfloat> @llvm.experimental.vector.extract.nxv6bf16.nxv12bf16(<vscale x 12 x bfloat>, i64)
|
||||
declare <vscale x 6 x bfloat> @llvm.vector.extract.nxv6bf16.nxv12bf16(<vscale x 12 x bfloat>, i64)
|
||||
|
||||
;
|
||||
; Extract bf16 vector that needs promotion from one that needs widening.
|
||||
|
@ -906,7 +906,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv6bf16_0(<vscale x 6 x bfloat>
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat> %in, i64 0)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat> %in, i64 0)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -916,7 +916,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv6bf16_2(<vscale x 6 x bfloat>
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat> %in, i64 2)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat> %in, i64 2)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -926,11 +926,11 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv6bf16_4(<vscale x 6 x bfloat>
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat> %in, i64 4)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat> %in, i64 4)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat>, i64)
|
||||
declare <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv6bf16(<vscale x 6 x bfloat>, i64)
|
||||
|
||||
;
|
||||
; Extract 1/4th bf16 vector that needs promotion from legal type.
|
||||
|
@ -941,7 +941,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv8bf16_0(<vscale x 8 x bfloat>
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 0)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 0)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -951,7 +951,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv8bf16_2(<vscale x 8 x bfloat>
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 2)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 2)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -961,7 +961,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv8bf16_4(<vscale x 8 x bfloat>
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 4)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 4)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -971,11 +971,11 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv8bf16_6(<vscale x 8 x bfloat>
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 6)
|
||||
%res = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat> %in, i64 6)
|
||||
ret <vscale x 2 x bfloat> %res
|
||||
}
|
||||
|
||||
declare <vscale x 2 x bfloat> @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat>, i64)
|
||||
declare <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv8bf16(<vscale x 8 x bfloat>, i64)
|
||||
|
||||
;
|
||||
; Extract half bf16 vector that needs promotion from one that needs splitting.
|
||||
|
@ -985,7 +985,7 @@ define <vscale x 4 x bfloat> @extract_nxv4bf16_nxv16bf16_0(<vscale x 16 x bfloat
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 0)
|
||||
%res = call <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 0)
|
||||
ret <vscale x 4 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -994,7 +994,7 @@ define <vscale x 4 x bfloat> @extract_nxv4bf16_nxv16bf16_4(<vscale x 16 x bfloat
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 4)
|
||||
%res = call <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 4)
|
||||
ret <vscale x 4 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -1003,7 +1003,7 @@ define <vscale x 4 x bfloat> @extract_nxv4bf16_nxv16bf16_8(<vscale x 16 x bfloat
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpklo z0.s, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 8)
|
||||
%res = call <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 8)
|
||||
ret <vscale x 4 x bfloat> %res
|
||||
}
|
||||
|
||||
|
@ -1012,11 +1012,11 @@ define <vscale x 4 x bfloat> @extract_nxv4bf16_nxv16bf16_12(<vscale x 16 x bfloa
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uunpkhi z0.s, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 12)
|
||||
%res = call <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat> %in, i64 12)
|
||||
ret <vscale x 4 x bfloat> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x bfloat> @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.vector.extract.nxv4bf16.nxv16bf16(<vscale x 16 x bfloat>, i64)
|
||||
|
||||
|
||||
;
|
||||
|
@ -1030,7 +1030,7 @@ define <vscale x 2 x float> @extract_nxv2f32_nxv4f32_splat(float %f) {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 4 x float> poison, float %f, i32 0
|
||||
%splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
|
||||
%ext = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
%ext = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
ret <vscale x 2 x float> %ext
|
||||
}
|
||||
|
||||
|
@ -1041,7 +1041,7 @@ define <vscale x 2 x float> @extract_nxv2f32_nxv4f32_splat_const() {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 4 x float> poison, float 1.0, i32 0
|
||||
%splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
|
||||
%ext = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
%ext = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
|
||||
ret <vscale x 2 x float> %ext
|
||||
}
|
||||
|
||||
|
@ -1052,7 +1052,7 @@ define <vscale x 4 x i32> @extract_nxv4i32_nxv8i32_splat_const() {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i32> %ins, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
|
||||
%ext = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %splat, i64 0)
|
||||
%ext = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %splat, i64 0)
|
||||
ret <vscale x 4 x i32> %ext
|
||||
}
|
||||
|
||||
|
@ -1063,7 +1063,7 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_all_ones() {
|
|||
; CHECK-NEXT: ret
|
||||
%ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i1> %ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
|
||||
%ext = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %splat, i64 0)
|
||||
%ext = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %splat, i64 0)
|
||||
ret <vscale x 2 x i1> %ext
|
||||
}
|
||||
|
||||
|
@ -1072,9 +1072,9 @@ define <vscale x 2 x i1> @extract_nxv2i1_nxv16i1_all_zero() {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: pfalse p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%ext = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> zeroinitializer, i64 0)
|
||||
%ext = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> zeroinitializer, i64 0)
|
||||
ret <vscale x 2 x i1> %ext
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32>, i64)
|
||||
declare <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32>, i64)
|
||||
|
|
|
@ -8,7 +8,7 @@ define void @pred_store_v2i8(<vscale x 16 x i1> %pred, <2 x i8>* %addr) #0 {
|
|||
; CHECK-NEXT: store <vscale x 16 x i1> %pred, <vscale x 16 x i1>* [[TMP1]]
|
||||
; CHECK-NEXT: ret void
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
%extract = tail call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
store <2 x i8> %extract, <2 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ define void @pred_store_v4i8(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #1 {
|
|||
; CHECK-NEXT: store <vscale x 16 x i1> %pred, <vscale x 16 x i1>* [[TMP1]]
|
||||
; CHECK-NEXT: ret void
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
%extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
store <4 x i8> %extract, <4 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ define void @pred_store_v8i8(<vscale x 16 x i1> %pred, <8 x i8>* %addr) #2 {
|
|||
; CHECK-NEXT: store <vscale x 16 x i1> %pred, <vscale x 16 x i1>* [[TMP1]]
|
||||
; CHECK-NEXT: ret void
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
%extract = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
store <8 x i8> %extract, <8 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
@ -39,9 +39,9 @@ define void @pred_store_v8i8(<vscale x 16 x i1> %pred, <8 x i8>* %addr) #2 {
|
|||
; Check that too small of a vscale prevents optimization
|
||||
define void @pred_store_neg1(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #0 {
|
||||
; CHECK-LABEL: @pred_store_neg1(
|
||||
; CHECK: call <4 x i8> @llvm.experimental.vector.extract
|
||||
; CHECK: call <4 x i8> @llvm.vector.extract
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
%extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
store <4 x i8> %extract, <4 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
@ -49,9 +49,9 @@ define void @pred_store_neg1(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #0 {
|
|||
; Check that too large of a vscale prevents optimization
|
||||
define void @pred_store_neg2(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #2 {
|
||||
; CHECK-LABEL: @pred_store_neg2(
|
||||
; CHECK: call <4 x i8> @llvm.experimental.vector.extract
|
||||
; CHECK: call <4 x i8> @llvm.vector.extract
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
%extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
store <4 x i8> %extract, <4 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
@ -59,9 +59,9 @@ define void @pred_store_neg2(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #2 {
|
|||
; Check that a non-zero index prevents optimization
|
||||
define void @pred_store_neg3(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #1 {
|
||||
; CHECK-LABEL: @pred_store_neg3(
|
||||
; CHECK: call <4 x i8> @llvm.experimental.vector.extract
|
||||
; CHECK: call <4 x i8> @llvm.vector.extract
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 4)
|
||||
%extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 4)
|
||||
store <4 x i8> %extract, <4 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
@ -69,16 +69,16 @@ define void @pred_store_neg3(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #1 {
|
|||
; Check that differing vscale min/max prevents optimization
|
||||
define void @pred_store_neg4(<vscale x 16 x i1> %pred, <4 x i8>* %addr) #3 {
|
||||
; CHECK-LABEL: @pred_store_neg4(
|
||||
; CHECK: call <4 x i8> @llvm.experimental.vector.extract
|
||||
; CHECK: call <4 x i8> @llvm.vector.extract
|
||||
%bitcast = bitcast <vscale x 16 x i1> %pred to <vscale x 2 x i8>
|
||||
%extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
%extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8> %bitcast, i64 0)
|
||||
store <4 x i8> %extract, <4 x i8>* %addr, align 4
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
declare <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
declare <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
declare <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8>, i64)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" vscale_range(1,1) }
|
||||
attributes #1 = { "target-features"="+sve" vscale_range(2,2) }
|
||||
|
|
|
@ -13,7 +13,7 @@ define <4 x i8> @extract_subvector_v8i8(<8 x i8> %op) vscale_range(2,0) #0 {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: zip2 v0.8b, v0.8b, v0.8b
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <4 x i8> @llvm.experimental.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4)
|
||||
%ret = call <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4)
|
||||
ret <4 x i8> %ret
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ define <8 x i8> @extract_subvector_v16i8(<16 x i8> %op) vscale_range(2,0) #0 {
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <8 x i8> @llvm.experimental.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8)
|
||||
%ret = call <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8)
|
||||
ret <8 x i8> %ret
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ define void @extract_subvector_v32i8(<32 x i8>* %a, <16 x i8>* %b) vscale_range(
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <32 x i8>, <32 x i8>* %a
|
||||
%ret = call <16 x i8> @llvm.experimental.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16)
|
||||
%ret = call <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16)
|
||||
store <16 x i8> %ret, <16 x i8>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ define void @extract_subvector_v64i8(<64 x i8>* %a, <32 x i8>* %b) #0 {
|
|||
; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x1]
|
||||
; VBITS_GE_512-NEXT: ret
|
||||
%op = load <64 x i8>, <64 x i8>* %a
|
||||
%ret = call <32 x i8> @llvm.experimental.vector.extract.v32i8.v64i8(<64 x i8> %op, i64 32)
|
||||
%ret = call <32 x i8> @llvm.vector.extract.v32i8.v64i8(<64 x i8> %op, i64 32)
|
||||
store <32 x i8> %ret, <32 x i8>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ define void @extract_subvector_v128i8(<128 x i8>* %a, <64 x i8>* %b) vscale_rang
|
|||
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <128 x i8>, <128 x i8>* %a
|
||||
%ret = call <64 x i8> @llvm.experimental.vector.extract.v64i8.v128i8(<128 x i8> %op, i64 64)
|
||||
%ret = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %op, i64 64)
|
||||
store <64 x i8> %ret, <64 x i8>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ define void @extract_subvector_v256i8(<256 x i8>* %a, <128 x i8>* %b) vscale_ran
|
|||
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <256 x i8>, <256 x i8>* %a
|
||||
%ret = call <128 x i8> @llvm.experimental.vector.extract.v128i8.v256i8(<256 x i8> %op, i64 128)
|
||||
%ret = call <128 x i8> @llvm.vector.extract.v128i8.v256i8(<256 x i8> %op, i64 128)
|
||||
store <128 x i8> %ret, <128 x i8>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ define <2 x i16> @extract_subvector_v4i16(<4 x i16> %op) vscale_range(2,0) #0 {
|
|||
; CHECK-NEXT: mov v0.s[1], w9
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <2 x i16> @llvm.experimental.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2)
|
||||
%ret = call <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2)
|
||||
ret <2 x i16> %ret
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ define <4 x i16> @extract_subvector_v8i16(<8 x i16> %op) vscale_range(2,0) #0 {
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <4 x i16> @llvm.experimental.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4)
|
||||
%ret = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4)
|
||||
ret <4 x i16> %ret
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ define void @extract_subvector_v16i16(<16 x i16>* %a, <8 x i16>* %b) vscale_rang
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <16 x i16>, <16 x i16>* %a
|
||||
%ret = call <8 x i16> @llvm.experimental.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8)
|
||||
%ret = call <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8)
|
||||
store <8 x i16> %ret, <8 x i16>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ define void @extract_subvector_v32i16(<32 x i16>* %a, <16 x i16>* %b) #0 {
|
|||
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x1]
|
||||
; VBITS_GE_512-NEXT: ret
|
||||
%op = load <32 x i16>, <32 x i16>* %a
|
||||
%ret = call <16 x i16> @llvm.experimental.vector.extract.v16i16.v32i16(<32 x i16> %op, i64 16)
|
||||
%ret = call <16 x i16> @llvm.vector.extract.v16i16.v32i16(<32 x i16> %op, i64 16)
|
||||
store <16 x i16> %ret, <16 x i16>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ define void @extract_subvector_v64i16(<64 x i16>* %a, <32 x i16>* %b) vscale_ran
|
|||
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <64 x i16>, <64 x i16>* %a
|
||||
%ret = call <32 x i16> @llvm.experimental.vector.extract.v32i16.v64i16(<64 x i16> %op, i64 32)
|
||||
%ret = call <32 x i16> @llvm.vector.extract.v32i16.v64i16(<64 x i16> %op, i64 32)
|
||||
store <32 x i16> %ret, <32 x i16>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ define void @extract_subvector_v128i16(<128 x i16>* %a, <64 x i16>* %b) vscale_r
|
|||
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <128 x i16>, <128 x i16>* %a
|
||||
%ret = call <64 x i16> @llvm.experimental.vector.extract.v64i16.v128i16(<128 x i16> %op, i64 64)
|
||||
%ret = call <64 x i16> @llvm.vector.extract.v64i16.v128i16(<128 x i16> %op, i64 64)
|
||||
store <64 x i16> %ret, <64 x i16>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ define <1 x i32> @extract_subvector_v2i32(<2 x i32> %op) vscale_range(2,0) #0 {
|
|||
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
|
||||
; CHECK-NEXT: dup v0.2s, v0.s[1]
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <1 x i32> @llvm.experimental.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1)
|
||||
%ret = call <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1)
|
||||
ret <1 x i32> %ret
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,7 @@ define <2 x i32> @extract_subvector_v4i32(<4 x i32> %op) vscale_range(2,0) #0 {
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2)
|
||||
%ret = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2)
|
||||
ret <2 x i32> %ret
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ define void @extract_subvector_v8i32(<8 x i32>* %a, <4 x i32>* %b) vscale_range(
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <8 x i32>, <8 x i32>* %a
|
||||
%ret = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4)
|
||||
%ret = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4)
|
||||
store <4 x i32> %ret, <4 x i32>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ define void @extract_subvector_v16i32(<16 x i32>* %a, <8 x i32>* %b) #0 {
|
|||
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1]
|
||||
; VBITS_GE_512-NEXT: ret
|
||||
%op = load <16 x i32>, <16 x i32>* %a
|
||||
%ret = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v16i32(<16 x i32> %op, i64 8)
|
||||
%ret = call <8 x i32> @llvm.vector.extract.v8i32.v16i32(<16 x i32> %op, i64 8)
|
||||
store <8 x i32> %ret, <8 x i32>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ define void @extract_subvector_v32i32(<32 x i32>* %a, <16 x i32>* %b) vscale_ran
|
|||
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <32 x i32>, <32 x i32>* %a
|
||||
%ret = call <16 x i32> @llvm.experimental.vector.extract.v16i32.v32i32(<32 x i32> %op, i64 16)
|
||||
%ret = call <16 x i32> @llvm.vector.extract.v16i32.v32i32(<32 x i32> %op, i64 16)
|
||||
store <16 x i32> %ret, <16 x i32>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ define void @extract_subvector_v64i32(<64 x i32>* %a, <32 x i32>* %b) vscale_ran
|
|||
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <64 x i32>, <64 x i32>* %a
|
||||
%ret = call <32 x i32> @llvm.experimental.vector.extract.v32i32.v64i32(<64 x i32> %op, i64 32)
|
||||
%ret = call <32 x i32> @llvm.vector.extract.v32i32.v64i32(<64 x i32> %op, i64 32)
|
||||
store <32 x i32> %ret, <32 x i32>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ define <1 x i64> @extract_subvector_v2i64(<2 x i64> %op) vscale_range(2,0) #0 {
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <1 x i64> @llvm.experimental.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1)
|
||||
%ret = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1)
|
||||
ret <1 x i64> %ret
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ define void @extract_subvector_v4i64(<4 x i64>* %a, <2 x i64>* %b) vscale_range(
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <4 x i64>, <4 x i64>* %a
|
||||
%ret = call <2 x i64> @llvm.experimental.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2)
|
||||
%ret = call <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2)
|
||||
store <2 x i64> %ret, <2 x i64>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ define void @extract_subvector_v8i64(<8 x i64>* %a, <4 x i64>* %b) vscale_range(
|
|||
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <8 x i64>, <8 x i64>* %a
|
||||
%ret = call <4 x i64> @llvm.experimental.vector.extract.v4i64.v8i64(<8 x i64> %op, i64 4)
|
||||
%ret = call <4 x i64> @llvm.vector.extract.v4i64.v8i64(<8 x i64> %op, i64 4)
|
||||
store <4 x i64> %ret, <4 x i64>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ define void @extract_subvector_v16i64(<16 x i64>* %a, <8 x i64>* %b) #0 {
|
|||
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; VBITS_GE_256-NEXT: ret
|
||||
%op = load <16 x i64>, <16 x i64>* %a
|
||||
%ret = call <8 x i64> @llvm.experimental.vector.extract.v8i64.v16i64(<16 x i64> %op, i64 8)
|
||||
%ret = call <8 x i64> @llvm.vector.extract.v8i64.v16i64(<16 x i64> %op, i64 8)
|
||||
store <8 x i64> %ret, <8 x i64>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ define void @extract_subvector_v32i64(<32 x i64>* %a, <16 x i64>* %b) vscale_ran
|
|||
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <32 x i64>, <32 x i64>* %a
|
||||
%ret = call <16 x i64> @llvm.experimental.vector.extract.v16i64.v32i64(<32 x i64> %op, i64 16)
|
||||
%ret = call <16 x i64> @llvm.vector.extract.v16i64.v32i64(<32 x i64> %op, i64 16)
|
||||
store <16 x i64> %ret, <16 x i64>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -363,7 +363,7 @@ define <2 x half> @extract_subvector_v4f16(<4 x half> %op) vscale_range(16,0) #0
|
|||
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
|
||||
; CHECK-NEXT: dup v0.2s, v0.s[1]
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <2 x half> @llvm.experimental.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2)
|
||||
%ret = call <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2)
|
||||
ret <2 x half> %ret
|
||||
}
|
||||
|
||||
|
@ -374,7 +374,7 @@ define <4 x half> @extract_subvector_v8f16(<8 x half> %op) vscale_range(2,0) #0
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <4 x half> @llvm.experimental.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4)
|
||||
%ret = call <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4)
|
||||
ret <4 x half> %ret
|
||||
}
|
||||
|
||||
|
@ -387,7 +387,7 @@ define void @extract_subvector_v16f16(<16 x half>* %a, <8 x half>* %b) vscale_ra
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <16 x half>, <16 x half>* %a
|
||||
%ret = call <8 x half> @llvm.experimental.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8)
|
||||
%ret = call <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8)
|
||||
store <8 x half> %ret, <8 x half>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ define void @extract_subvector_v32f16(<32 x half>* %a, <16 x half>* %b) #0 {
|
|||
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x1]
|
||||
; VBITS_GE_512-NEXT: ret
|
||||
%op = load <32 x half>, <32 x half>* %a
|
||||
%ret = call <16 x half> @llvm.experimental.vector.extract.v16f16.v32f16(<32 x half> %op, i64 16)
|
||||
%ret = call <16 x half> @llvm.vector.extract.v16f16.v32f16(<32 x half> %op, i64 16)
|
||||
store <16 x half> %ret, <16 x half>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -425,7 +425,7 @@ define void @extract_subvector_v64f16(<64 x half>* %a, <32 x half>* %b) vscale_r
|
|||
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <64 x half>, <64 x half>* %a
|
||||
%ret = call <32 x half> @llvm.experimental.vector.extract.v32f16.v64f16(<64 x half> %op, i64 32)
|
||||
%ret = call <32 x half> @llvm.vector.extract.v32f16.v64f16(<64 x half> %op, i64 32)
|
||||
store <32 x half> %ret, <32 x half>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ define void @extract_subvector_v128f16(<128 x half>* %a, <64 x half>* %b) vscale
|
|||
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <128 x half>, <128 x half>* %a
|
||||
%ret = call <64 x half> @llvm.experimental.vector.extract.v64f16.v128f16(<128 x half> %op, i64 64)
|
||||
%ret = call <64 x half> @llvm.vector.extract.v64f16.v128f16(<128 x half> %op, i64 64)
|
||||
store <64 x half> %ret, <64 x half>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -454,7 +454,7 @@ define <1 x float> @extract_subvector_v2f32(<2 x float> %op) vscale_range(2,0) #
|
|||
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
|
||||
; CHECK-NEXT: dup v0.2s, v0.s[1]
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <1 x float> @llvm.experimental.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1)
|
||||
%ret = call <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1)
|
||||
ret <1 x float> %ret
|
||||
}
|
||||
|
||||
|
@ -465,7 +465,7 @@ define <2 x float> @extract_subvector_v4f32(<4 x float> %op) vscale_range(2,0) #
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <2 x float> @llvm.experimental.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2)
|
||||
%ret = call <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2)
|
||||
ret <2 x float> %ret
|
||||
}
|
||||
|
||||
|
@ -478,7 +478,7 @@ define void @extract_subvector_v8f32(<8 x float>* %a, <4 x float>* %b) vscale_ra
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <8 x float>, <8 x float>* %a
|
||||
%ret = call <4 x float> @llvm.experimental.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4)
|
||||
%ret = call <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4)
|
||||
store <4 x float> %ret, <4 x float>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -501,7 +501,7 @@ define void @extract_subvector_v16f32(<16 x float>* %a, <8 x float>* %b) #0 {
|
|||
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1]
|
||||
; VBITS_GE_512-NEXT: ret
|
||||
%op = load <16 x float>, <16 x float>* %a
|
||||
%ret = call <8 x float> @llvm.experimental.vector.extract.v8f32.v16f32(<16 x float> %op, i64 8)
|
||||
%ret = call <8 x float> @llvm.vector.extract.v8f32.v16f32(<16 x float> %op, i64 8)
|
||||
store <8 x float> %ret, <8 x float>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -516,7 +516,7 @@ define void @extract_subvector_v32f32(<32 x float>* %a, <16 x float>* %b) vscale
|
|||
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <32 x float>, <32 x float>* %a
|
||||
%ret = call <16 x float> @llvm.experimental.vector.extract.v16f32.v32f32(<32 x float> %op, i64 16)
|
||||
%ret = call <16 x float> @llvm.vector.extract.v16f32.v32f32(<32 x float> %op, i64 16)
|
||||
store <16 x float> %ret, <16 x float>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -531,7 +531,7 @@ define void @extract_subvector_v64f32(<64 x float>* %a, <32 x float>* %b) vscale
|
|||
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <64 x float>, <64 x float>* %a
|
||||
%ret = call <32 x float> @llvm.experimental.vector.extract.v32f32.v64f32(<64 x float> %op, i64 32)
|
||||
%ret = call <32 x float> @llvm.vector.extract.v32f32.v64f32(<64 x float> %op, i64 32)
|
||||
store <32 x float> %ret, <32 x float>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -545,7 +545,7 @@ define <1 x double> @extract_subvector_v2f64(<2 x double> %op) vscale_range(2,0)
|
|||
; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
|
||||
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
|
||||
; CHECK-NEXT: ret
|
||||
%ret = call <1 x double> @llvm.experimental.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1)
|
||||
%ret = call <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1)
|
||||
ret <1 x double> %ret
|
||||
}
|
||||
|
||||
|
@ -558,7 +558,7 @@ define void @extract_subvector_v4f64(<4 x double>* %a, <2 x double>* %b) vscale_
|
|||
; CHECK-NEXT: str q0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <4 x double>, <4 x double>* %a
|
||||
%ret = call <2 x double> @llvm.experimental.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2)
|
||||
%ret = call <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2)
|
||||
store <2 x double> %ret, <2 x double>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -581,7 +581,7 @@ define void @extract_subvector_v8f64(<8 x double>* %a, <4 x double>* %b) #0 {
|
|||
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; VBITS_GE_512-NEXT: ret
|
||||
%op = load <8 x double>, <8 x double>* %a
|
||||
%ret = call <4 x double> @llvm.experimental.vector.extract.v4f64.v8f64(<8 x double> %op, i64 4)
|
||||
%ret = call <4 x double> @llvm.vector.extract.v4f64.v8f64(<8 x double> %op, i64 4)
|
||||
store <4 x double> %ret, <4 x double>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -596,7 +596,7 @@ define void @extract_subvector_v16f64(<16 x double>* %a, <8 x double>* %b) vscal
|
|||
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <16 x double>, <16 x double>* %a
|
||||
%ret = call <8 x double> @llvm.experimental.vector.extract.v8f64.v16f64(<16 x double> %op, i64 8)
|
||||
%ret = call <8 x double> @llvm.vector.extract.v8f64.v16f64(<16 x double> %op, i64 8)
|
||||
store <8 x double> %ret, <8 x double>* %b
|
||||
ret void
|
||||
}
|
||||
|
@ -611,56 +611,56 @@ define void @extract_subvector_v32f64(<32 x double>* %a, <16 x double>* %b) vsca
|
|||
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%op = load <32 x double>, <32 x double>* %a
|
||||
%ret = call <16 x double> @llvm.experimental.vector.extract.v16f64.v32f64(<32 x double> %op, i64 16)
|
||||
%ret = call <16 x double> @llvm.vector.extract.v16f64.v32f64(<32 x double> %op, i64 16)
|
||||
store <16 x double> %ret, <16 x double>* %b
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <4 x i8> @llvm.experimental.vector.extract.v4i8.v8i8(<8 x i8>, i64)
|
||||
declare <8 x i8> @llvm.experimental.vector.extract.v8i8.v16i8(<16 x i8>, i64)
|
||||
declare <16 x i8> @llvm.experimental.vector.extract.v16i8.v32i8(<32 x i8>, i64)
|
||||
declare <32 x i8> @llvm.experimental.vector.extract.v32i8.v64i8(<64 x i8>, i64)
|
||||
declare <64 x i8> @llvm.experimental.vector.extract.v64i8.v128i8(<128 x i8>, i64)
|
||||
declare <128 x i8> @llvm.experimental.vector.extract.v128i8.v256i8(<256 x i8>, i64)
|
||||
declare <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8>, i64)
|
||||
declare <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8>, i64)
|
||||
declare <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8>, i64)
|
||||
declare <32 x i8> @llvm.vector.extract.v32i8.v64i8(<64 x i8>, i64)
|
||||
declare <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8>, i64)
|
||||
declare <128 x i8> @llvm.vector.extract.v128i8.v256i8(<256 x i8>, i64)
|
||||
|
||||
declare <2 x i16> @llvm.experimental.vector.extract.v2i16.v4i16(<4 x i16>, i64)
|
||||
declare <4 x i16> @llvm.experimental.vector.extract.v4i16.v8i16(<8 x i16>, i64)
|
||||
declare <8 x i16> @llvm.experimental.vector.extract.v8i16.v16i16(<16 x i16>, i64)
|
||||
declare <16 x i16> @llvm.experimental.vector.extract.v16i16.v32i16(<32 x i16>, i64)
|
||||
declare <32 x i16> @llvm.experimental.vector.extract.v32i16.v64i16(<64 x i16>, i64)
|
||||
declare <64 x i16> @llvm.experimental.vector.extract.v64i16.v128i16(<128 x i16>, i64)
|
||||
declare <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16>, i64)
|
||||
declare <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16>, i64)
|
||||
declare <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16>, i64)
|
||||
declare <16 x i16> @llvm.vector.extract.v16i16.v32i16(<32 x i16>, i64)
|
||||
declare <32 x i16> @llvm.vector.extract.v32i16.v64i16(<64 x i16>, i64)
|
||||
declare <64 x i16> @llvm.vector.extract.v64i16.v128i16(<128 x i16>, i64)
|
||||
|
||||
declare <1 x i32> @llvm.experimental.vector.extract.v1i32.v2i32(<2 x i32>, i64)
|
||||
declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<4 x i32>, i64)
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32>, i64)
|
||||
declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v16i32(<16 x i32>, i64)
|
||||
declare <16 x i32> @llvm.experimental.vector.extract.v16i32.v32i32(<32 x i32>, i64)
|
||||
declare <32 x i32> @llvm.experimental.vector.extract.v32i32.v64i32(<64 x i32>, i64)
|
||||
declare <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32>, i64)
|
||||
declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32>, i64)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64)
|
||||
declare <8 x i32> @llvm.vector.extract.v8i32.v16i32(<16 x i32>, i64)
|
||||
declare <16 x i32> @llvm.vector.extract.v16i32.v32i32(<32 x i32>, i64)
|
||||
declare <32 x i32> @llvm.vector.extract.v32i32.v64i32(<64 x i32>, i64)
|
||||
|
||||
declare <1 x i64> @llvm.experimental.vector.extract.v1i64.v2i64(<2 x i64>, i64)
|
||||
declare <2 x i64> @llvm.experimental.vector.extract.v2i64.v4i64(<4 x i64>, i64)
|
||||
declare <4 x i64> @llvm.experimental.vector.extract.v4i64.v8i64(<8 x i64>, i64)
|
||||
declare <8 x i64> @llvm.experimental.vector.extract.v8i64.v16i64(<16 x i64>, i64)
|
||||
declare <16 x i64> @llvm.experimental.vector.extract.v16i64.v32i64(<32 x i64>, i64)
|
||||
declare <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64>, i64)
|
||||
declare <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64>, i64)
|
||||
declare <4 x i64> @llvm.vector.extract.v4i64.v8i64(<8 x i64>, i64)
|
||||
declare <8 x i64> @llvm.vector.extract.v8i64.v16i64(<16 x i64>, i64)
|
||||
declare <16 x i64> @llvm.vector.extract.v16i64.v32i64(<32 x i64>, i64)
|
||||
|
||||
declare <2 x half> @llvm.experimental.vector.extract.v2f16.v4f16(<4 x half>, i64)
|
||||
declare <4 x half> @llvm.experimental.vector.extract.v4f16.v8f16(<8 x half>, i64)
|
||||
declare <8 x half> @llvm.experimental.vector.extract.v8f16.v16f16(<16 x half>, i64)
|
||||
declare <16 x half> @llvm.experimental.vector.extract.v16f16.v32f16(<32 x half>, i64)
|
||||
declare <32 x half> @llvm.experimental.vector.extract.v32f16.v64f16(<64 x half>, i64)
|
||||
declare <64 x half> @llvm.experimental.vector.extract.v64f16.v128f16(<128 x half>, i64)
|
||||
declare <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half>, i64)
|
||||
declare <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half>, i64)
|
||||
declare <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half>, i64)
|
||||
declare <16 x half> @llvm.vector.extract.v16f16.v32f16(<32 x half>, i64)
|
||||
declare <32 x half> @llvm.vector.extract.v32f16.v64f16(<64 x half>, i64)
|
||||
declare <64 x half> @llvm.vector.extract.v64f16.v128f16(<128 x half>, i64)
|
||||
|
||||
declare <1 x float> @llvm.experimental.vector.extract.v1f32.v2f32(<2 x float>, i64)
|
||||
declare <2 x float> @llvm.experimental.vector.extract.v2f32.v4f32(<4 x float>, i64)
|
||||
declare <4 x float> @llvm.experimental.vector.extract.v4f32.v8f32(<8 x float>, i64)
|
||||
declare <8 x float> @llvm.experimental.vector.extract.v8f32.v16f32(<16 x float>, i64)
|
||||
declare <16 x float> @llvm.experimental.vector.extract.v16f32.v32f32(<32 x float>, i64)
|
||||
declare <32 x float> @llvm.experimental.vector.extract.v32f32.v64f32(<64 x float>, i64)
|
||||
declare <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float>, i64)
|
||||
declare <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float>, i64)
|
||||
declare <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float>, i64)
|
||||
declare <8 x float> @llvm.vector.extract.v8f32.v16f32(<16 x float>, i64)
|
||||
declare <16 x float> @llvm.vector.extract.v16f32.v32f32(<32 x float>, i64)
|
||||
declare <32 x float> @llvm.vector.extract.v32f32.v64f32(<64 x float>, i64)
|
||||
|
||||
declare <1 x double> @llvm.experimental.vector.extract.v1f64.v2f64(<2 x double>, i64)
|
||||
declare <2 x double> @llvm.experimental.vector.extract.v2f64.v4f64(<4 x double>, i64)
|
||||
declare <4 x double> @llvm.experimental.vector.extract.v4f64.v8f64(<8 x double>, i64)
|
||||
declare <8 x double> @llvm.experimental.vector.extract.v8f64.v16f64(<16 x double>, i64)
|
||||
declare <16 x double> @llvm.experimental.vector.extract.v16f64.v32f64(<32 x double>, i64)
|
||||
declare <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double>, i64)
|
||||
declare <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double>, i64)
|
||||
declare <4 x double> @llvm.vector.extract.v4f64.v8f64(<8 x double>, i64)
|
||||
declare <8 x double> @llvm.vector.extract.v8f64.v16f64(<16 x double>, i64)
|
||||
declare <16 x double> @llvm.vector.extract.v16f64.v32f64(<32 x double>, i64)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" }
|
||||
|
|
|
@ -8,7 +8,7 @@ define <vscale x 16 x i1> @pred_load_v2i8(<2 x i8>* %addr) #0 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]]
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
%load = load <2 x i8>, <2 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ define <vscale x 16 x i1> @pred_load_v4i8(<4 x i8>* %addr) #1 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]]
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
%load = load <4 x i8>, <4 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ define <vscale x 16 x i1> @pred_load_v8i8(<8 x i8>* %addr) #2 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]]
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
|
||||
%load = load <8 x i8>, <8 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ entry:
|
|||
br label %bb1
|
||||
|
||||
bb1:
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -57,9 +57,9 @@ bb1:
|
|||
; Check that too small of a vscale prevents optimization
|
||||
define <vscale x 16 x i1> @pred_load_neg1(<4 x i8>* %addr) #0 {
|
||||
; CHECK-LABEL: @pred_load_neg1(
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.experimental.vector.insert
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
|
||||
%load = load <4 x i8>, <4 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -67,9 +67,9 @@ define <vscale x 16 x i1> @pred_load_neg1(<4 x i8>* %addr) #0 {
|
|||
; Check that too large of a vscale prevents optimization
|
||||
define <vscale x 16 x i1> @pred_load_neg2(<4 x i8>* %addr) #2 {
|
||||
; CHECK-LABEL: @pred_load_neg2(
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.experimental.vector.insert
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
|
||||
%load = load <4 x i8>, <4 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -77,9 +77,9 @@ define <vscale x 16 x i1> @pred_load_neg2(<4 x i8>* %addr) #2 {
|
|||
; Check that a non-zero index prevents optimization
|
||||
define <vscale x 16 x i1> @pred_load_neg3(<4 x i8>* %addr) #1 {
|
||||
; CHECK-LABEL: @pred_load_neg3(
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.experimental.vector.insert
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
|
||||
%load = load <4 x i8>, <4 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 4)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 4)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -87,9 +87,9 @@ define <vscale x 16 x i1> @pred_load_neg3(<4 x i8>* %addr) #1 {
|
|||
; Check that differing vscale min/max prevents optimization
|
||||
define <vscale x 16 x i1> @pred_load_neg4(<4 x i8>* %addr) #3 {
|
||||
; CHECK-LABEL: @pred_load_neg4(
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.experimental.vector.insert
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
|
||||
%load = load <4 x i8>, <4 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
@ -97,16 +97,16 @@ define <vscale x 16 x i1> @pred_load_neg4(<4 x i8>* %addr) #3 {
|
|||
; Check that insertion into a non-undef vector prevents optimization
|
||||
define <vscale x 16 x i1> @pred_load_neg5(<4 x i8>* %addr, <vscale x 2 x i8> %passthru) #1 {
|
||||
; CHECK-LABEL: @pred_load_neg5(
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.experimental.vector.insert
|
||||
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
|
||||
%load = load <4 x i8>, <4 x i8>* %addr, align 4
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> %passthru, <4 x i8> %load, i64 0)
|
||||
%insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> %passthru, <4 x i8> %load, i64 0)
|
||||
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
|
||||
ret <vscale x 16 x i1> %ret
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8>, <2 x i8>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8>, <4 x i8>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8>, <8 x i8>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8>, <2 x i8>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8>, <4 x i8>, i64)
|
||||
declare <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8>, <8 x i8>, i64)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" vscale_range(1,1) }
|
||||
attributes #1 = { "target-features"="+sve" vscale_range(2,2) }
|
||||
|
|
|
@ -8,7 +8,7 @@ define <vscale x 2 x i64> @insert_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <2 x i6
|
|||
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
|
||||
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 0)
|
||||
%retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 0)
|
||||
ret <vscale x 2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ define <vscale x 2 x i64> @insert_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec, <2
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 2)
|
||||
%retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 2)
|
||||
ret <vscale x 2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ define <vscale x 4 x i32> @insert_v4i32_nxv4i32(<vscale x 4 x i32> %vec, <4 x i3
|
|||
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
|
||||
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 0)
|
||||
%retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 0)
|
||||
ret <vscale x 4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ define <vscale x 4 x i32> @insert_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec, <4
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 4)
|
||||
%retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 4)
|
||||
ret <vscale x 4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ define <vscale x 8 x i16> @insert_v8i16_nxv8i16(<vscale x 8 x i16> %vec, <8 x i1
|
|||
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
|
||||
; CHECK-NEXT: mov z0.h, p0/m, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec, i64 0)
|
||||
%retval = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec, i64 0)
|
||||
ret <vscale x 8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ define <vscale x 8 x i16> @insert_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec, <8
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec, i64 8)
|
||||
%retval = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec, i64 8)
|
||||
ret <vscale x 8 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ define <vscale x 16 x i8> @insert_v16i8_nxv16i8(<vscale x 16 x i8> %vec, <16 x i
|
|||
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
|
||||
; CHECK-NEXT: mov z0.b, p0/m, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec, i64 0)
|
||||
%retval = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec, i64 0)
|
||||
ret <vscale x 16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ define <vscale x 16 x i8> @insert_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec, <
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec, i64 16)
|
||||
%retval = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec, i64 16)
|
||||
ret <vscale x 16 x i8> %retval
|
||||
}
|
||||
|
||||
|
@ -152,8 +152,8 @@ define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64>
|
|||
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
|
||||
%v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, <vscale x 16 x
|
|||
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x
|
|||
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #5, mul vl]
|
||||
; CHECK-NEXT: st1d { z0.d }, p0, [x0, #4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -212,8 +212,8 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16
|
|||
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
||||
; CHECK-NEXT: .cfi_restore w29
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
|
||||
%v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ define void @insert_v2i64_nxv16i64_lo0(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
|
|||
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i64>, <2 x i64>* %psv
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
|
|||
; CHECK-NEXT: .cfi_restore w29
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i64>, <2 x i64>* %psv
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ define <vscale x 4 x i32> @insert_nxv1i32_nxv4i32_undef() nounwind {
|
|||
entry:
|
||||
%0 = insertelement <vscale x 1 x i32> undef, i32 1, i32 0
|
||||
%subvec = shufflevector <vscale x 1 x i32> %0, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
|
||||
%retval = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> %subvec, i64 0)
|
||||
%retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> %subvec, i64 0)
|
||||
ret <vscale x 4 x i32> %retval
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ define <vscale x 6 x i16> @insert_nxv1i16_nxv6i16_undef() nounwind {
|
|||
entry:
|
||||
%0 = insertelement <vscale x 1 x i16> undef, i16 1, i32 0
|
||||
%subvec = shufflevector <vscale x 1 x i16> %0, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
|
||||
%retval = call <vscale x 6 x i16> @llvm.experimental.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> undef, <vscale x 1 x i16> %subvec, i64 0)
|
||||
%retval = call <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> undef, <vscale x 1 x i16> %subvec, i64 0)
|
||||
ret <vscale x 6 x i16> %retval
|
||||
}
|
||||
|
||||
|
@ -291,7 +291,7 @@ define <vscale x 4 x float> @insert_nxv1f32_nxv4f32_undef(<vscale x 1 x float> %
|
|||
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%retval = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> %subvec, i64 0)
|
||||
%retval = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> %subvec, i64 0)
|
||||
ret <vscale x 4 x float> %retval
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ define <vscale x 8 x i16> @insert_nxv8i16_nxv2i16(<vscale x 8 x i16> %vec, <vsca
|
|||
; CHECK-NEXT: uzp1 z1.s, z2.s, z1.s
|
||||
; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%r = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> %vec, <vscale x 2 x i16> %in, i64 2)
|
||||
%r = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> %vec, <vscale x 2 x i16> %in, i64 2)
|
||||
ret <vscale x 8 x i16> %r
|
||||
}
|
||||
|
||||
|
@ -315,7 +315,7 @@ define <vscale x 4 x half> @insert_nxv4f16_nxv2f16_0(<vscale x 4 x half> %sv0, <
|
|||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x half> @llvm.experimental.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1, i64 0)
|
||||
%v0 = call <vscale x 4 x half> @llvm.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1, i64 0)
|
||||
ret <vscale x 4 x half> %v0
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ define <vscale x 4 x half> @insert_nxv4f16_nxv2f16_2(<vscale x 4 x half> %sv0, <
|
|||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x half> @llvm.experimental.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1, i64 2)
|
||||
%v0 = call <vscale x 4 x half> @llvm.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1, i64 2)
|
||||
ret <vscale x 4 x half> %v0
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,7 @@ define <vscale x 8 x half> @insert_nxv8f16_nxv2f16(<vscale x 8 x half> %vec, <vs
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%r = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half> %vec, <vscale x 2 x half> %in, i64 2)
|
||||
%r = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half> %vec, <vscale x 2 x half> %in, i64 2)
|
||||
ret <vscale x 8 x half> %r
|
||||
}
|
||||
|
||||
|
@ -353,7 +353,7 @@ define <vscale x 8 x half> @insert_nxv8f16_nxv4f16_0(<vscale x 8 x half> %sv0, <
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1, i64 0)
|
||||
%v0 = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1, i64 0)
|
||||
ret <vscale x 8 x half> %v0
|
||||
}
|
||||
|
||||
|
@ -363,7 +363,7 @@ define <vscale x 8 x half> @insert_nxv8f16_nxv4f16_4(<vscale x 8 x half> %sv0, <
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1, i64 4)
|
||||
%v0 = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1, i64 4)
|
||||
ret <vscale x 8 x half> %v0
|
||||
}
|
||||
|
||||
|
@ -388,7 +388,7 @@ define <vscale x 2 x i64> @insert_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%retval = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 2)
|
||||
%retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 2)
|
||||
ret <vscale x 2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -413,7 +413,7 @@ define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, <
|
|||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%subvec = load <4 x i64>, <4 x i64>* %ptr
|
||||
%retval = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> %vec, <4 x i64> %subvec, i64 4)
|
||||
%retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> %vec, <4 x i64> %subvec, i64 4)
|
||||
ret <vscale x 2 x i64> %retval
|
||||
}
|
||||
|
||||
|
@ -426,7 +426,7 @@ define <vscale x 3 x i32> @insert_nxv3i32_nxv2i32(<vscale x 2 x i32> %sv0) {
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 3 x i32> @llvm.experimental.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
|
||||
%v0 = call <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
|
||||
ret <vscale x 3 x i32> %v0
|
||||
}
|
||||
|
||||
|
@ -437,7 +437,7 @@ define <vscale x 3 x i32> @insert_nxv3i32_nxv2i32_2(<vscale x 3 x i32> %sv0, <vs
|
|||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 3 x i32> @llvm.experimental.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> %sv0, <vscale x 2 x i32> %sv1, i64 0)
|
||||
%v0 = call <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> %sv0, <vscale x 2 x i32> %sv1, i64 0)
|
||||
ret <vscale x 3 x i32> %v0
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,7 @@ define <vscale x 3 x float> @insert_nxv3f32_nxv2f32(<vscale x 2 x float> %sv0) n
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 3 x float> @llvm.experimental.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float> undef, <vscale x 2 x float> %sv0, i64 0)
|
||||
%v0 = call <vscale x 3 x float> @llvm.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float> undef, <vscale x 2 x float> %sv0, i64 0)
|
||||
ret <vscale x 3 x float> %v0
|
||||
}
|
||||
|
||||
|
@ -456,7 +456,7 @@ define <vscale x 4 x float> @insert_nxv4f32_nxv2f32_0(<vscale x 4 x float> %sv0,
|
|||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1, i64 0)
|
||||
%v0 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1, i64 0)
|
||||
ret <vscale x 4 x float> %v0
|
||||
}
|
||||
|
||||
|
@ -466,7 +466,7 @@ define <vscale x 4 x float> @insert_nxv4f32_nxv2f32_2(<vscale x 4 x float> %sv0,
|
|||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1, i64 2)
|
||||
%v0 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1, i64 2)
|
||||
ret <vscale x 4 x float> %v0
|
||||
}
|
||||
|
||||
|
@ -482,8 +482,8 @@ define <vscale x 6 x i32> @insert_nxv6i32_nxv2i32(<vscale x 2 x i32> %sv0, <vsc
|
|||
; CHECK-NEXT: addvl sp, sp, #2
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 6 x i32> @llvm.experimental.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
|
||||
%v1 = call <vscale x 6 x i32> @llvm.experimental.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> %v0, <vscale x 2 x i32> %sv1, i64 2)
|
||||
%v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
|
||||
%v1 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> %v0, <vscale x 2 x i32> %sv1, i64 2)
|
||||
ret <vscale x 6 x i32> %v1
|
||||
}
|
||||
|
||||
|
@ -492,7 +492,7 @@ define <vscale x 6 x i32> @insert_nxv6i32_nxv3i32(<vscale x 3 x i32> %sv0) {
|
|||
; CHECK-LABEL: insert_nxv6i32_nxv3i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 6 x i32> @llvm.experimental.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32> undef, <vscale x 3 x i32> %sv0, i64 0)
|
||||
%v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32> undef, <vscale x 3 x i32> %sv0, i64 0)
|
||||
ret <vscale x 6 x i32> %v0
|
||||
}
|
||||
|
||||
|
@ -500,9 +500,9 @@ define <vscale x 12 x i32> @insert_nxv12i32_nxv4i32(<vscale x 4 x i32> %sv0, <vs
|
|||
; CHECK-LABEL: insert_nxv12i32_nxv4i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 12 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> undef, <vscale x 4 x i32> %sv0, i64 0)
|
||||
%v1 = call <vscale x 12 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v0, <vscale x 4 x i32> %sv1, i64 4)
|
||||
%v2 = call <vscale x 12 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v1, <vscale x 4 x i32> %sv2, i64 8)
|
||||
%v0 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> undef, <vscale x 4 x i32> %sv0, i64 0)
|
||||
%v1 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v0, <vscale x 4 x i32> %sv1, i64 4)
|
||||
%v2 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v1, <vscale x 4 x i32> %sv2, i64 8)
|
||||
ret <vscale x 12 x i32> %v2
|
||||
}
|
||||
|
||||
|
@ -511,7 +511,7 @@ define <vscale x 2 x bfloat> @insert_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %sv
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: mov z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 2 x bfloat> @llvm.experimental.vector.insert.nxv2bf16.nxv2bf16(<vscale x 2 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 0)
|
||||
%v0 = call <vscale x 2 x bfloat> @llvm.vector.insert.nxv2bf16.nxv2bf16(<vscale x 2 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 0)
|
||||
ret <vscale x 2 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -520,7 +520,7 @@ define <vscale x 4 x bfloat> @insert_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %sv
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: mov z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.nxv4bf16(<vscale x 4 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 0)
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv4bf16(<vscale x 4 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 0)
|
||||
ret <vscale x 4 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -537,7 +537,7 @@ define <vscale x 4 x bfloat> @insert_nxv4bf16_v4bf16(<vscale x 4 x bfloat> %sv0,
|
|||
; CHECK-NEXT: addvl sp, sp, #1
|
||||
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.v4bf16(<vscale x 4 x bfloat> %sv0, <4 x bfloat> %v1, i64 0)
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.v4bf16(<vscale x 4 x bfloat> %sv0, <4 x bfloat> %v1, i64 0)
|
||||
ret <vscale x 4 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -546,7 +546,7 @@ define <vscale x 8 x bfloat> @insert_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %sv
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: mov z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.nxv8bf16(<vscale x 8 x bfloat> %sv0, <vscale x 8 x bfloat> %sv1, i64 0)
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv8bf16(<vscale x 8 x bfloat> %sv0, <vscale x 8 x bfloat> %sv1, i64 0)
|
||||
ret <vscale x 8 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -557,7 +557,7 @@ define <vscale x 8 x bfloat> @insert_nxv8bf16_v8bf16(<vscale x 8 x bfloat> %sv0,
|
|||
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
|
||||
; CHECK-NEXT: mov z0.h, p0/m, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> %sv0, <8 x bfloat> %v1, i64 0)
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> %sv0, <8 x bfloat> %v1, i64 0)
|
||||
ret <vscale x 8 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -567,7 +567,7 @@ define <vscale x 8 x bfloat> @insert_nxv8bf16_nxv4bf16_0(<vscale x 8 x bfloat> %
|
|||
; CHECK-NEXT: uunpkhi z0.s, z0.h
|
||||
; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 0)
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 0)
|
||||
ret <vscale x 8 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,7 @@ define <vscale x 8 x bfloat> @insert_nxv8bf16_nxv4bf16_4(<vscale x 8 x bfloat> %
|
|||
; CHECK-NEXT: uunpklo z0.s, z0.h
|
||||
; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 4)
|
||||
%v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 4)
|
||||
ret <vscale x 8 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -587,7 +587,7 @@ define <vscale x 4 x bfloat> @insert_nxv4bf16_nxv2bf16_0(<vscale x 4 x bfloat> %
|
|||
; CHECK-NEXT: uunpkhi z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 0)
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 0)
|
||||
ret <vscale x 4 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -597,7 +597,7 @@ define <vscale x 4 x bfloat> @insert_nxv4bf16_nxv2bf16_2(<vscale x 4 x bfloat> %
|
|||
; CHECK-NEXT: uunpklo z0.d, z0.s
|
||||
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 2)
|
||||
%v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 2)
|
||||
ret <vscale x 4 x bfloat> %v0
|
||||
}
|
||||
|
||||
|
@ -608,7 +608,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv8i1_0(<vscale x 16 x i1> %vec, <vsc
|
|||
; CHECK-NEXT: punpkhi p0.h, p0.b
|
||||
; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv8i1(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv, i64 0)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv8i1(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv, i64 0)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -618,7 +618,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv8i1_8(<vscale x 16 x i1> %vec, <vsc
|
|||
; CHECK-NEXT: punpklo p0.h, p0.b
|
||||
; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv8i1(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv, i64 8)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv8i1(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv, i64 8)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -632,7 +632,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_0(<vscale x 16 x i1> %vec, <vsc
|
|||
; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
|
||||
; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv, i64 0)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv, i64 0)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -645,7 +645,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_12(<vscale x 16 x i1> %vec, <vs
|
|||
; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
|
||||
; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv, i64 12)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv, i64 12)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -657,7 +657,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_into_zero(<vscale x 4 x i1> %sv
|
|||
; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h
|
||||
; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> zeroinitializer, <vscale x 4 x i1> %sv, i64 0)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> zeroinitializer, <vscale x 4 x i1> %sv, i64 0)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -667,7 +667,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_into_poison(<vscale x 4 x i1> %
|
|||
; CHECK-NEXT: uzp1 p0.h, p0.h, p0.h
|
||||
; CHECK-NEXT: uzp1 p0.b, p0.b, p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %sv, i64 0)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %sv, i64 0)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -677,7 +677,7 @@ define <vscale x 2 x i1> @insert_nxv2i1_v8i1_const_true_into_undef() vscale_rang
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.d
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 2 x i1> @llvm.experimental.vector.insert.nxv2i1.v8i1 (<vscale x 2 x i1> undef, <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
%v0 = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1 (<vscale x 2 x i1> undef, <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
ret <vscale x 2 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -686,7 +686,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_v16i1_const_true_into_undef() vscale_ran
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.s
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 4 x i1> @llvm.experimental.vector.insert.nxv4i1.v16i1 (<vscale x 4 x i1> undef, <16 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
%v0 = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1 (<vscale x 4 x i1> undef, <16 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
ret <vscale x 4 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -695,7 +695,7 @@ define <vscale x 8 x i1> @insert_nxv8i1_v32i1_const_true_into_undef() vscale_ran
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.h
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 8 x i1> @llvm.experimental.vector.insert.nxv8i1.v32i1 (<vscale x 8 x i1> undef, <32 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
%v0 = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1 (<vscale x 8 x i1> undef, <32 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
ret <vscale x 8 x i1> %v0
|
||||
}
|
||||
|
||||
|
@ -704,49 +704,49 @@ define <vscale x 16 x i1> @insert_nxv16i1_v64i1_const_true_into_undef() vscale_r
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.b
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.v64i1 (<vscale x 16 x i1> undef, <64 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
%v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.v64i1 (<vscale x 16 x i1> undef, <64 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
|
||||
ret <vscale x 16 x i1> %v0
|
||||
}
|
||||
|
||||
attributes #0 = { vscale_range(2,2) }
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
|
||||
|
||||
declare <vscale x 6 x i16> @llvm.experimental.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16>, <vscale x 1 x i16>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16>, <vscale x 2 x i16>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
|
||||
declare <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16>, <vscale x 1 x i16>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16>, <vscale x 2 x i16>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
|
||||
|
||||
declare <vscale x 3 x i32> @llvm.experimental.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32>, <vscale x 2 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32>, <vscale x 1 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 12 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 6 x i32> @llvm.experimental.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32>, <vscale x 2 x i32>, i64)
|
||||
declare <vscale x 6 x i32> @llvm.experimental.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32>, <vscale x 3 x i32>, i64)
|
||||
declare <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32>, <vscale x 2 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32>, <vscale x 1 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32>, <vscale x 2 x i32>, i64)
|
||||
declare <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32>, <vscale x 3 x i32>, i64)
|
||||
|
||||
declare <vscale x 2 x bfloat> @llvm.experimental.vector.insert.nxv2bf16.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat>, <vscale x 2 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.experimental.vector.insert.nxv4bf16.v4bf16(<vscale x 4 x bfloat>, <4 x bfloat>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat>, <vscale x 4 x bfloat>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
|
||||
declare <vscale x 2 x bfloat> @llvm.vector.insert.nxv2bf16.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat>, <vscale x 2 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, i64)
|
||||
declare <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.v4bf16(<vscale x 4 x bfloat>, <4 x bfloat>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat>, <vscale x 4 x bfloat>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64>, <4 x i64>, i64)
|
||||
declare <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64)
|
||||
declare <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64>, <4 x i64>, i64)
|
||||
declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64)
|
||||
declare <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
|
||||
|
||||
declare <vscale x 4 x half> @llvm.experimental.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half>, <vscale x 2 x half>, i64)
|
||||
declare <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half>, <vscale x 2 x half>, i64)
|
||||
declare <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half>, <vscale x 4 x half>, i64)
|
||||
declare <vscale x 4 x half> @llvm.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half>, <vscale x 2 x half>, i64)
|
||||
declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half>, <vscale x 2 x half>, i64)
|
||||
declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half>, <vscale x 4 x half>, i64)
|
||||
|
||||
declare <vscale x 3 x float> @llvm.experimental.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float>, <vscale x 2 x float>, i64)
|
||||
declare <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float>, <vscale x 1 x float>, i64)
|
||||
declare <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float>, <vscale x 2 x float>, i64)
|
||||
declare <vscale x 3 x float> @llvm.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float>, <vscale x 2 x float>, i64)
|
||||
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float>, <vscale x 1 x float>, i64)
|
||||
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float>, <vscale x 2 x float>, i64)
|
||||
|
||||
declare <vscale x 2 x i1> @llvm.experimental.vector.insert.nxv2i1.v8i1(<vscale x 2 x i1>, <8 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.experimental.vector.insert.nxv4i1.v16i1(<vscale x 4 x i1>, <16 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.experimental.vector.insert.nxv8i1.v32i1(<vscale x 8 x i1>, <32 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.experimental.vector.insert.nx16i1.nxv8i1(<vscale x 16 x i1>, <vscale x 8 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.v64i1(<vscale x 16 x i1>, <64 x i1>, i64)
|
||||
declare <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1(<vscale x 2 x i1>, <8 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1(<vscale x 4 x i1>, <16 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1(<vscale x 8 x i1>, <32 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.vector.insert.nx16i1.nxv8i1(<vscale x 16 x i1>, <vscale x 8 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.v64i1(<vscale x 16 x i1>, <64 x i1>, i64)
|
||||
|
|
|
@ -588,7 +588,7 @@ define dso_local <vscale x 2 x double> @dupq_ld1rqd_f64() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI49_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call fast <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> <double 1.000000e+00, double 2.000000e+00>, i64 0)
|
||||
%1 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> <double 1.000000e+00, double 2.000000e+00>, i64 0)
|
||||
%2 = tail call fast <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ define dso_local <vscale x 4 x float> @dupq_ld1rqw_f32() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI50_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call fast <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00>, i64 0)
|
||||
%1 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00>, i64 0)
|
||||
%2 = tail call fast <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
|
||||
ret <vscale x 4 x float> %2
|
||||
}
|
||||
|
@ -612,7 +612,7 @@ define dso_local <vscale x 8 x half> @dupq_ld1rqh_f16() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI51_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call fast <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400, half 0xH4500, half 0xH4600, half 0xH4700, half 0xH4800>, i64 0)
|
||||
%1 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400, half 0xH4500, half 0xH4600, half 0xH4700, half 0xH4800>, i64 0)
|
||||
%2 = tail call fast <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
@ -624,7 +624,7 @@ define dso_local <vscale x 8 x bfloat> @dupq_ld1rqh_bf16() #0 {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI52_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> <bfloat 1.000e+00, bfloat 2.000e+00, bfloat 3.000e+00, bfloat 4.000e+00, bfloat 5.000e+00, bfloat 6.000e+00, bfloat 7.000e+00, bfloat 8.000e+00>, i64 0)
|
||||
%1 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> <bfloat 1.000e+00, bfloat 2.000e+00, bfloat 3.000e+00, bfloat 4.000e+00, bfloat 5.000e+00, bfloat 6.000e+00, bfloat 7.000e+00, bfloat 8.000e+00>, i64 0)
|
||||
%2 = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
|
||||
ret <vscale x 8 x bfloat> %2
|
||||
}
|
||||
|
@ -636,7 +636,7 @@ define dso_local <vscale x 2 x i64> @dupq_ld1rqd_i64() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI53_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> <i64 1, i64 2>, i64 0)
|
||||
%1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> <i64 1, i64 2>, i64 0)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ define dso_local <vscale x 4 x i32> @dupq_ld1rqd_i32() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI54_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i64 0)
|
||||
%1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i64 0)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
|
||||
ret <vscale x 4 x i32> %2
|
||||
}
|
||||
|
@ -660,7 +660,7 @@ define dso_local <vscale x 8 x i16> @dupq_ld1rqd_i16() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI55_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i64 0)
|
||||
%1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i64 0)
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
@ -672,7 +672,7 @@ define dso_local <vscale x 16 x i8> @dupq_ld1rqd_i8() {
|
|||
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI56_0]
|
||||
; CHECK-NEXT: mov z0.q, q0
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, i64 0)
|
||||
%1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, i64 0)
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
|
||||
ret <vscale x 16 x i8> %2
|
||||
}
|
||||
|
@ -2559,14 +2559,14 @@ declare <vscale x 2 x float> @llvm.aarch64.sve.zip2.nxv2f32(<vscale x 2 x float>
|
|||
declare <vscale x 4 x float> @llvm.aarch64.sve.zip2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.zip2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)
|
||||
declare <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
|
||||
declare <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half>, <8 x half>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
|
||||
declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)
|
||||
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
|
||||
declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half>, <8 x half>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
|
||||
declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
|
||||
|
||||
; +bf16 is required for the bfloat version.
|
||||
attributes #0 = { "target-features"="+sve,+bf16" }
|
||||
|
|
|
@ -8,13 +8,13 @@ define <4 x i32> @sve_no_typesize_warning(<vscale x 8 x i16> %a, <4 x i16> %b) #
|
|||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uaddl v0.4s, v0.4h, v1.4h
|
||||
; CHECK-NEXT: ret
|
||||
%a.lo = call <4 x i16> @llvm.experimental.vector.extract.v4i16.nxv8i16(<vscale x 8 x i16> %a, i64 0)
|
||||
%a.lo = call <4 x i16> @llvm.vector.extract.v4i16.nxv8i16(<vscale x 8 x i16> %a, i64 0)
|
||||
%a.lo.zext = zext <4 x i16> %a.lo to <4 x i32>
|
||||
%b.zext = zext <4 x i16> %b to <4 x i32>
|
||||
%add = add <4 x i32> %a.lo.zext, %b.zext
|
||||
ret <4 x i32> %add
|
||||
}
|
||||
|
||||
declare <4 x i16> @llvm.experimental.vector.extract.v4i16.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <4 x i16> @llvm.vector.extract.v4i16.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" }
|
||||
|
|
|
@ -11,7 +11,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16(i8* %ap, <vscale x 16 x i8> %b)
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 10)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 8 x i1> %extract to <vscale x 8 x i16>
|
||||
%p1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
|
||||
%cmp1 = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %p1, <vscale x 8 x i16> %ext1, <vscale x 8 x i16> zeroinitializer)
|
||||
|
@ -30,7 +30,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_vl(i8* %ap, <vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 8 x i1> %extract to <vscale x 8 x i16>
|
||||
%p1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
|
||||
%cmp1 = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %p1, <vscale x 8 x i16> %ext1, <vscale x 8 x i16> zeroinitializer)
|
||||
|
@ -47,7 +47,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_parg(i8* %ap, <vscale x 16 x i8
|
|||
; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 8 x i1> %extract to <vscale x 8 x i16>
|
||||
%p1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
|
||||
%cmp1 = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %p1, <vscale x 8 x i16> %ext1, <vscale x 8 x i16> zeroinitializer)
|
||||
|
@ -64,7 +64,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32(i8* %ap, <vscale x 16 x i8> %b)
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 10)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 4 x i1> %extract to <vscale x 4 x i32>
|
||||
%p1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
|
||||
%cmp1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %p1, <vscale x 4 x i32> %ext1, <vscale x 4 x i32> zeroinitializer)
|
||||
|
@ -84,7 +84,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_vl(i8* %ap, <vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 4 x i1> %extract to <vscale x 4 x i32>
|
||||
%p1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
|
||||
%cmp1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %p1, <vscale x 4 x i32> %ext1, <vscale x 4 x i32> zeroinitializer)
|
||||
|
@ -102,7 +102,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32_parg(i8* %ap, <vscale x 16 x i8
|
|||
; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 4 x i1> %extract to <vscale x 4 x i32>
|
||||
%p1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
|
||||
%cmp1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %p1, <vscale x 4 x i32> %ext1, <vscale x 4 x i32> zeroinitializer)
|
||||
|
@ -120,7 +120,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64(i8* %ap, <vscale x 16 x i8> %b)
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 10)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 2 x i1> %extract to <vscale x 2 x i64>
|
||||
%p1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 10)
|
||||
%cmp1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %p1, <vscale x 2 x i64> %ext1, <vscale x 2 x i64> zeroinitializer)
|
||||
|
@ -141,7 +141,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_vl(i8* %ap, <vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 2 x i1> %extract to <vscale x 2 x i64>
|
||||
%p1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 10)
|
||||
%cmp1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %p1, <vscale x 2 x i64> %ext1, <vscale x 2 x i64> zeroinitializer)
|
||||
|
@ -160,7 +160,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64_parg(i8* %ap, <vscale x 16 x i8
|
|||
; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 2 x i1> %extract to <vscale x 2 x i64>
|
||||
%p1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 10)
|
||||
%cmp1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %p1, <vscale x 2 x i64> %ext1, <vscale x 2 x i64> zeroinitializer)
|
||||
|
@ -179,7 +179,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_all(i8* %ap, <vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 8 x i1> %extract to <vscale x 8 x i16>
|
||||
%p1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
|
||||
%cmp1 = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %p1, <vscale x 8 x i16> %ext1, <vscale x 8 x i16> zeroinitializer)
|
||||
|
@ -199,7 +199,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_all(i8* %ap, <vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 4 x i1> %extract to <vscale x 4 x i32>
|
||||
%p1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
|
||||
%cmp1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %p1, <vscale x 4 x i32> %ext1, <vscale x 4 x i32> zeroinitializer)
|
||||
|
@ -218,7 +218,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_all(i8* %ap, <vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%cmp = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %p0, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer)
|
||||
%extract = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%extract = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1> %cmp, i64 0)
|
||||
%ext1 = sext <vscale x 2 x i1> %extract to <vscale x 2 x i64>
|
||||
%p1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%cmp1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %p1, <vscale x 2 x i64> %ext1, <vscale x 2 x i64> zeroinitializer)
|
||||
|
@ -232,9 +232,9 @@ declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
|
|||
declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
|
||||
declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32)
|
||||
|
||||
declare <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.vector.extract.nxv8i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.vector.extract.nxv4i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
declare <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv16i1(<vscale x 16 x i1>, i64)
|
||||
|
||||
declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
|
|
|
@ -11,7 +11,7 @@ define i1 @reduce_or_insert_subvec_into_zero(<vscale x 4 x i1> %in) {
|
|||
; CHECK-NEXT: ptest p0, p0.b
|
||||
; CHECK-NEXT: cset w0, ne
|
||||
; CHECK-NEXT: ret
|
||||
%t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> zeroinitializer, <vscale x 4 x i1> %in, i64 0)
|
||||
%t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> zeroinitializer, <vscale x 4 x i1> %in, i64 0)
|
||||
%res = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %t)
|
||||
ret i1 %res
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ define i1 @reduce_or_insert_subvec_into_poison(<vscale x 4 x i1> %in) {
|
|||
; CHECK-NEXT: ptest p0, p0.b
|
||||
; CHECK-NEXT: cset w0, ne
|
||||
; CHECK-NEXT: ret
|
||||
%t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %in, i64 0)
|
||||
%t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %in, i64 0)
|
||||
%res = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %t)
|
||||
ret i1 %res
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ define i1 @reduce_or_insert_subvec_into_nonzero(<vscale x 4 x i1> %in, <vscale x
|
|||
; CHECK-NEXT: ptest p0, p0.b
|
||||
; CHECK-NEXT: cset w0, ne
|
||||
; CHECK-NEXT: ret
|
||||
%t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %in, i64 0)
|
||||
%t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %in, i64 0)
|
||||
%res = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %t)
|
||||
ret i1 %res
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ define i1 @reduce_and_insert_subvec_into_ones(<vscale x 4 x i1> %in) {
|
|||
; CHECK-NEXT: ret
|
||||
%allones.ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
|
||||
%allones = shufflevector <vscale x 16 x i1> %allones.ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
|
||||
%t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %allones, <vscale x 4 x i1> %in, i64 0)
|
||||
%t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %allones, <vscale x 4 x i1> %in, i64 0)
|
||||
%res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
|
||||
ret i1 %res
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ define i1 @reduce_and_insert_subvec_into_poison(<vscale x 4 x i1> %in) {
|
|||
; CHECK-NEXT: ptest p1, p0.b
|
||||
; CHECK-NEXT: cset w0, eq
|
||||
; CHECK-NEXT: ret
|
||||
%t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %in, i64 0)
|
||||
%t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %in, i64 0)
|
||||
%res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
|
||||
ret i1 %res
|
||||
}
|
||||
|
@ -88,11 +88,11 @@ define i1 @reduce_and_insert_subvec_into_var(<vscale x 4 x i1> %in, <vscale x 16
|
|||
; CHECK-NEXT: ptest p2, p0.b
|
||||
; CHECK-NEXT: cset w0, eq
|
||||
; CHECK-NEXT: ret
|
||||
%t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %in, i64 0)
|
||||
%t = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %in, i64 0)
|
||||
%res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
|
||||
ret i1 %res
|
||||
}
|
||||
|
||||
declare i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1>)
|
||||
declare i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1>)
|
||||
declare <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i64)
|
||||
declare <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i64)
|
||||
|
|
|
@ -6,7 +6,7 @@ define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
|
|||
; CHECK-LABEL: extract_nxv8i32_nxv4i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
|
||||
ret <vscale x 4 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v8, v10
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
|
||||
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
|
||||
ret <vscale x 4 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
|
|||
; CHECK-LABEL: extract_nxv8i32_nxv2i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v9
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v10
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v11
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec)
|
|||
; CHECK-LABEL: extract_nxv16i32_nxv8i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
ret <vscale x 8 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv4r.v v8, v12
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
|
||||
%c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
|
||||
ret <vscale x 8 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec)
|
|||
; CHECK-LABEL: extract_nxv16i32_nxv4i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
ret <vscale x 4 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v8, v10
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
|
||||
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
|
||||
ret <vscale x 4 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v8, v12
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
|
||||
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
|
||||
ret <vscale x 4 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v8, v14
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
|
||||
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
|
||||
ret <vscale x 4 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec)
|
|||
; CHECK-LABEL: extract_nxv16i32_nxv2i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v9
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v10
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v11
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v12
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v13
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v14
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v15
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
|
||||
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
|
||||
ret <vscale x 2 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec)
|
|||
; CHECK-LABEL: extract_nxv16i32_nxv1i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
|
||||
ret <vscale x 1 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec)
|
|||
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 1)
|
||||
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 1)
|
||||
ret <vscale x 1 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_3(<vscale x 16 x i32> %vec)
|
|||
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v9, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 3)
|
||||
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 3)
|
||||
ret <vscale x 1 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_15(<vscale x 16 x i32> %vec)
|
|||
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v15, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 15)
|
||||
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 15)
|
||||
ret <vscale x 1 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_2(<vscale x 16 x i32> %vec)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v9
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
|
||||
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
|
||||
ret <vscale x 1 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ define <vscale x 1 x i32> @extract_nxv2i32_nxv1i32_0(<vscale x 2 x i32> %vec) {
|
|||
; CHECK-LABEL: extract_nxv2i32_nxv1i32_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
|
||||
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
|
||||
ret <vscale x 1 x i32> %c
|
||||
}
|
||||
|
||||
|
@ -242,7 +242,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_0(<vscale x 32 x i8> %vec) {
|
|||
; CHECK-LABEL: extract_nxv32i8_nxv2i8_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
|
||||
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
|
||||
ret <vscale x 2 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_2(<vscale x 32 x i8> %vec) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 2)
|
||||
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 2)
|
||||
ret <vscale x 2 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_4(<vscale x 32 x i8> %vec) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 4)
|
||||
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 4)
|
||||
ret <vscale x 2 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -280,7 +280,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 6)
|
||||
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 6)
|
||||
ret <vscale x 2 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_8(<vscale x 32 x i8> %vec) {
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v9
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
|
||||
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
|
||||
ret <vscale x 2 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v10, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 22)
|
||||
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 22)
|
||||
ret <vscale x 2 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ define <vscale x 1 x i8> @extract_nxv8i8_nxv1i8_7(<vscale x 8 x i8> %vec) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 7)
|
||||
%c = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 7)
|
||||
ret <vscale x 1 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -330,7 +330,7 @@ define <vscale x 1 x i8> @extract_nxv4i8_nxv1i8_3(<vscale x 4 x i8> %vec) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 3)
|
||||
%c = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 3)
|
||||
ret <vscale x 1 x i8> %c
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec
|
|||
; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
|
||||
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
|
||||
ret <vscale x 2 x half> %c
|
||||
}
|
||||
|
||||
|
@ -350,7 +350,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_2(<vscale x 16 x half> %vec
|
|||
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 2)
|
||||
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 2)
|
||||
ret <vscale x 2 x half> %c
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_4(<vscale x 16 x half> %vec
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v9
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
|
||||
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
|
||||
ret <vscale x 2 x half> %c
|
||||
}
|
||||
|
||||
|
@ -367,7 +367,7 @@ define <vscale x 8 x i1> @extract_nxv64i1_nxv8i1_0(<vscale x 64 x i1> %mask) {
|
|||
; CHECK-LABEL: extract_nxv64i1_nxv8i1_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 0)
|
||||
%c = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 0)
|
||||
ret <vscale x 8 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -379,7 +379,7 @@ define <vscale x 8 x i1> @extract_nxv64i1_nxv8i1_8(<vscale x 64 x i1> %mask) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v0, v0, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 8)
|
||||
%c = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 8)
|
||||
ret <vscale x 8 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -387,7 +387,7 @@ define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_0(<vscale x 64 x i1> %mask) {
|
|||
; CHECK-LABEL: extract_nxv64i1_nxv2i1_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 0)
|
||||
%c = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 0)
|
||||
ret <vscale x 2 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -404,7 +404,7 @@ define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_2(<vscale x 64 x i1> %mask) {
|
|||
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmsne.vi v0, v8, 0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 2)
|
||||
%c = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 2)
|
||||
ret <vscale x 2 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_0(<vscale x 32 x i1> %x) {
|
|||
; CHECK-LABEL: extract_nxv4i1_nxv32i1_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 0)
|
||||
%c = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 0)
|
||||
ret <vscale x 4 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -429,7 +429,7 @@ define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_4(<vscale x 32 x i1> %x) {
|
|||
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmsne.vi v0, v8, 0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 4)
|
||||
%c = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 4)
|
||||
ret <vscale x 4 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -437,7 +437,7 @@ define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_0(<vscale x 32 x i1> %x) {
|
|||
; CHECK-LABEL: extract_nxv16i1_nxv32i1_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 16 x i1> @llvm.experimental.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 0)
|
||||
%c = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 0)
|
||||
ret <vscale x 16 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -449,7 +449,7 @@ define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_16(<vscale x 32 x i1> %x) {
|
|||
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v0, v0, a0
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <vscale x 16 x i1> @llvm.experimental.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 16)
|
||||
%c = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 16)
|
||||
ret <vscale x 16 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -460,7 +460,7 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in)
|
|||
; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
|
||||
%res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
|
||||
ret <vscale x 6 x half> %res
|
||||
}
|
||||
|
||||
|
@ -478,31 +478,31 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in)
|
|||
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v10, a0
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
|
||||
%res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
|
||||
ret <vscale x 6 x half> %res
|
||||
}
|
||||
|
||||
declare <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half>, i64)
|
||||
declare <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half>, i64)
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 %idx)
|
||||
declare <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 %idx)
|
||||
declare <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 %idx)
|
||||
declare <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 %idx)
|
||||
declare <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 %idx)
|
||||
declare <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 4 x i1> @llvm.experimental.vector.extract.nxv4i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
declare <vscale x 16 x i1> @llvm.experimental.vector.extract.nxv16i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
declare <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
declare <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
|
||||
declare <vscale x 2 x i1> @llvm.experimental.vector.extract.nxv2i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
declare <vscale x 8 x i1> @llvm.experimental.vector.extract.nxv8i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
declare <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
declare <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
|
|
|
@ -11,7 +11,7 @@ define void @extract_v2i8_v4i8_0(<4 x i8>* %x, <2 x i8>* %y) {
|
|||
; CHECK-NEXT: vse8.v v8, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0)
|
||||
%c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0)
|
||||
store <2 x i8> %c, <2 x i8>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ define void @extract_v2i8_v4i8_2(<4 x i8>* %x, <2 x i8>* %y) {
|
|||
; CHECK-NEXT: vse8.v v8, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2)
|
||||
%c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2)
|
||||
store <2 x i8> %c, <2 x i8>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ define void @extract_v2i8_v8i8_0(<8 x i8>* %x, <2 x i8>* %y) {
|
|||
; CHECK-NEXT: vse8.v v8, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0)
|
||||
%c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0)
|
||||
store <2 x i8> %c, <2 x i8>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ define void @extract_v2i8_v8i8_6(<8 x i8>* %x, <2 x i8>* %y) {
|
|||
; CHECK-NEXT: vse8.v v8, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6)
|
||||
%c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6)
|
||||
store <2 x i8> %c, <2 x i8>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ define void @extract_v2i32_v8i32_0(<8 x i32>* %x, <2 x i32>* %y) {
|
|||
; LMULMAX1-NEXT: vse32.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0)
|
||||
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0)
|
||||
store <2 x i32> %c, <2 x i32>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ define void @extract_v2i32_v8i32_2(<8 x i32>* %x, <2 x i32>* %y) {
|
|||
; LMULMAX1-NEXT: vse32.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2)
|
||||
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2)
|
||||
store <2 x i32> %c, <2 x i32>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ define void @extract_v2i32_v8i32_6(<8 x i32>* %x, <2 x i32>* %y) {
|
|||
; LMULMAX1-NEXT: vse32.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6)
|
||||
%c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6)
|
||||
store <2 x i32> %c, <2 x i32>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ define void @extract_v2i32_nxv16i32_0(<vscale x 16 x i32> %x, <2 x i32>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vse32.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 0)
|
||||
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 0)
|
||||
store <2 x i32> %c, <2 x i32>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, <2 x i32>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vse32.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
|
||||
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
|
||||
store <2 x i32> %c, <2 x i32>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ define void @extract_v2i8_nxv2i8_0(<vscale x 2 x i8> %x, <2 x i8>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vse8.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 0)
|
||||
%c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 0)
|
||||
store <2 x i8> %c, <2 x i8>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ define void @extract_v2i8_nxv2i8_2(<vscale x 2 x i8> %x, <2 x i8>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vse8.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 2)
|
||||
%c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 2)
|
||||
store <2 x i8> %c, <2 x i8>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, <8 x i32>* %y) {
|
|||
; LMULMAX1-NEXT: vse32.v v8, (a1)
|
||||
; LMULMAX1-NEXT: vse32.v v16, (a0)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%c = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
|
||||
%c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
|
||||
store <8 x i32> %c, <8 x i32>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ define void @extract_v8i1_v64i1_0(<64 x i1>* %x, <8 x i1>* %y) {
|
|||
; LMULMAX1-NEXT: vsm.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <64 x i1>, <64 x i1>* %x
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ define void @extract_v8i1_v64i1_8(<64 x i1>* %x, <8 x i1>* %y) {
|
|||
; LMULMAX1-NEXT: vsm.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <64 x i1>, <64 x i1>* %x
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ define void @extract_v8i1_v64i1_48(<64 x i1>* %x, <8 x i1>* %y) {
|
|||
; LMULMAX1-NEXT: vsm.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <64 x i1>, <64 x i1>* %x
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -292,7 +292,7 @@ define void @extract_v8i1_nxv2i1_0(<vscale x 2 x i1> %x, <8 x i1>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vsm.v v0, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ define void @extract_v8i1_nxv64i1_0(<vscale x 64 x i1> %x, <8 x i1>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vsm.v v0, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ define void @extract_v8i1_nxv64i1_8(<vscale x 64 x i1> %x, <8 x i1>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 8)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 8)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ define void @extract_v8i1_nxv64i1_48(<vscale x 64 x i1> %x, <8 x i1>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 48)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 48)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ define void @extract_v2i1_v64i1_0(<64 x i1>* %x, <2 x i1>* %y) {
|
|||
; LMULMAX1-NEXT: vsm.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <64 x i1>, <64 x i1>* %x
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -418,7 +418,7 @@ define void @extract_v2i1_v64i1_2(<64 x i1>* %x, <2 x i1>* %y) {
|
|||
; LMULMAX1-NEXT: vsm.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <64 x i1>, <64 x i1>* %x
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -469,7 +469,7 @@ define void @extract_v2i1_v64i1_42(<64 x i1>* %x, <2 x i1>* %y) {
|
|||
; LMULMAX1-NEXT: vsm.v v8, (a1)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%a = load <64 x i1>, <64 x i1>* %x
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ define void @extract_v2i1_nxv2i1_0(<vscale x 2 x i1> %x, <2 x i1>* %y) {
|
|||
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ define void @extract_v2i1_nxv2i1_2(<vscale x 2 x i1> %x, <2 x i1>* %y) {
|
|||
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 2)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 2)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ define void @extract_v2i1_nxv64i1_0(<vscale x 64 x i1> %x, <2 x i1>* %y) {
|
|||
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -557,7 +557,7 @@ define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, <2 x i1>* %y) {
|
|||
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 2)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 2)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -583,7 +583,7 @@ define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, <2 x i1>* %y) {
|
|||
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 42)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 42)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -608,7 +608,7 @@ define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, <2 x i1>* %y) {
|
|||
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
|
||||
%c = call <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
|
||||
store <2 x i1> %c, <2 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
@ -621,28 +621,28 @@ define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, <8 x i1>* %y) {
|
|||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vsm.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
|
||||
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
|
||||
store <8 x i1> %c, <8 x i1>* %y
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx)
|
||||
declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx)
|
||||
|
||||
declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
|
||||
declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
|
||||
|
||||
declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
declare <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
|
||||
|
||||
declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
declare <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
declare <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
|
||||
|
||||
declare <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx)
|
||||
declare <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx)
|
||||
declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx)
|
||||
declare <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx)
|
||||
declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
|
||||
declare <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %vec, i64 %idx)
|
||||
declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %vec, i64 %idx)
|
||||
|
||||
declare <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
|
||||
|
|
|
@ -18,7 +18,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, <2 x
|
|||
; CHECK-NEXT: vslideup.vi v8, v12, 0
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 0)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 0)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_2(<vscale x 8 x i32> %vec, <2 x
|
|||
; CHECK-NEXT: vslideup.vi v8, v12, 2
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 2)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 2)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, <2 x
|
|||
; CHECK-NEXT: vslideup.vi v8, v12, 6
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 6)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 6)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, <8 x
|
|||
; LMULMAX1-NEXT: vslideup.vi v8, v16, 4
|
||||
; LMULMAX1-NEXT: ret
|
||||
%sv = load <8 x i32>, <8 x i32>* %svp
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, <8 x
|
|||
; LMULMAX1-NEXT: vslideup.vi v8, v16, 12
|
||||
; LMULMAX1-NEXT: ret
|
||||
%sv = load <8 x i32>, <8 x i32>* %svp
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_undef_v2i32_0(<2 x i32>* %svp) {
|
|||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> undef, <2 x i32> %sv, i64 0)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> undef, <2 x i32> %sv, i64 0)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ define void @insert_v4i32_v2i32_0(<4 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%vec = load <4 x i32>, <4 x i32>* %vp
|
||||
%v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 0)
|
||||
%v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 0)
|
||||
store <4 x i32> %v, <4 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ define void @insert_v4i32_v2i32_2(<4 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%vec = load <4 x i32>, <4 x i32>* %vp
|
||||
%v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 2)
|
||||
%v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 2)
|
||||
store <4 x i32> %v, <4 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ define void @insert_v4i32_undef_v2i32_0(<4 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; CHECK-NEXT: vse32.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0)
|
||||
%v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0)
|
||||
store <4 x i32> %v, <4 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ define void @insert_v8i32_v2i32_0(<8 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; LMULMAX1-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%vec = load <8 x i32>, <8 x i32>* %vp
|
||||
%v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0)
|
||||
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0)
|
||||
store <8 x i32> %v, <8 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ define void @insert_v8i32_v2i32_2(<8 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; LMULMAX1-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%vec = load <8 x i32>, <8 x i32>* %vp
|
||||
%v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2)
|
||||
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2)
|
||||
store <8 x i32> %v, <8 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ define void @insert_v8i32_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; LMULMAX1-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%vec = load <8 x i32>, <8 x i32>* %vp
|
||||
%v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6)
|
||||
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6)
|
||||
store <8 x i32> %v, <8 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ define void @insert_v8i32_undef_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
|
|||
; LMULMAX1-NEXT: vse32.v v9, (a0)
|
||||
; LMULMAX1-NEXT: ret
|
||||
%sv = load <2 x i32>, <2 x i32>* %svp
|
||||
%v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6)
|
||||
%v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6)
|
||||
store <8 x i32> %v, <8 x i32>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ define void @insert_v4i16_v2i16_0(<4 x i16>* %vp, <2 x i16>* %svp) {
|
|||
; CHECK-NEXT: ret
|
||||
%v = load <4 x i16>, <4 x i16>* %vp
|
||||
%sv = load <2 x i16>, <2 x i16>* %svp
|
||||
%c = call <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 0)
|
||||
%c = call <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 0)
|
||||
store <4 x i16> %c, <4 x i16>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ define void @insert_v4i16_v2i16_2(<4 x i16>* %vp, <2 x i16>* %svp) {
|
|||
; CHECK-NEXT: ret
|
||||
%v = load <4 x i16>, <4 x i16>* %vp
|
||||
%sv = load <2 x i16>, <2 x i16>* %svp
|
||||
%c = call <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 2)
|
||||
%c = call <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 2)
|
||||
store <4 x i16> %c, <4 x i16>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ define void @insert_v32i1_v8i1_0(<32 x i1>* %vp, <8 x i1>* %svp) {
|
|||
; LMULMAX1-NEXT: ret
|
||||
%v = load <32 x i1>, <32 x i1>* %vp
|
||||
%sv = load <8 x i1>, <8 x i1>* %svp
|
||||
%c = call <32 x i1> @llvm.experimental.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0)
|
||||
%c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0)
|
||||
store <32 x i1> %c, <32 x i1>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ define void @insert_v32i1_v8i1_16(<32 x i1>* %vp, <8 x i1>* %svp) {
|
|||
; LMULMAX1-NEXT: ret
|
||||
%v = load <32 x i1>, <32 x i1>* %vp
|
||||
%sv = load <8 x i1>, <8 x i1>* %svp
|
||||
%c = call <32 x i1> @llvm.experimental.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16)
|
||||
%c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16)
|
||||
store <32 x i1> %c, <32 x i1>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ define void @insert_v8i1_v4i1_0(<8 x i1>* %vp, <4 x i1>* %svp) {
|
|||
; CHECK-NEXT: ret
|
||||
%v = load <8 x i1>, <8 x i1>* %vp
|
||||
%sv = load <4 x i1>, <4 x i1>* %svp
|
||||
%c = call <8 x i1> @llvm.experimental.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 0)
|
||||
%c = call <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 0)
|
||||
store <8 x i1> %c, <8 x i1>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -427,7 +427,7 @@ define void @insert_v8i1_v4i1_4(<8 x i1>* %vp, <4 x i1>* %svp) {
|
|||
; CHECK-NEXT: ret
|
||||
%v = load <8 x i1>, <8 x i1>* %vp
|
||||
%sv = load <4 x i1>, <4 x i1>* %svp
|
||||
%c = call <8 x i1> @llvm.experimental.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 4)
|
||||
%c = call <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 4)
|
||||
store <8 x i1> %c, <8 x i1>* %vp
|
||||
ret void
|
||||
}
|
||||
|
@ -441,7 +441,7 @@ define <vscale x 2 x i16> @insert_nxv2i16_v2i16_0(<vscale x 2 x i16> %v, <2 x i1
|
|||
; CHECK-NEXT: vslideup.vi v8, v9, 0
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i16>, <2 x i16>* %svp
|
||||
%c = call <vscale x 2 x i16> @llvm.experimental.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 0)
|
||||
%c = call <vscale x 2 x i16> @llvm.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 0)
|
||||
ret <vscale x 2 x i16> %c
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ define <vscale x 2 x i16> @insert_nxv2i16_v2i16_2(<vscale x 2 x i16> %v, <2 x i1
|
|||
; CHECK-NEXT: vslideup.vi v8, v9, 4
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i16>, <2 x i16>* %svp
|
||||
%c = call <vscale x 2 x i16> @llvm.experimental.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 4)
|
||||
%c = call <vscale x 2 x i16> @llvm.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 4)
|
||||
ret <vscale x 2 x i16> %c
|
||||
}
|
||||
|
||||
|
@ -476,7 +476,7 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, <4 x i1>* %
|
|||
; CHECK-NEXT: vmsne.vi v0, v9, 0
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <4 x i1>, <4 x i1>* %svp
|
||||
%c = call <vscale x 2 x i1> @llvm.experimental.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1> %v, <4 x i1> %sv, i64 0)
|
||||
%c = call <vscale x 2 x i1> @llvm.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1> %v, <4 x i1> %sv, i64 0)
|
||||
ret <vscale x 2 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -489,7 +489,7 @@ define <vscale x 8 x i1> @insert_nxv8i1_v4i1_0(<vscale x 8 x i1> %v, <8 x i1>* %
|
|||
; CHECK-NEXT: vslideup.vi v0, v8, 0
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <8 x i1>, <8 x i1>* %svp
|
||||
%c = call <vscale x 8 x i1> @llvm.experimental.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1> %v, <8 x i1> %sv, i64 0)
|
||||
%c = call <vscale x 8 x i1> @llvm.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1> %v, <8 x i1> %sv, i64 0)
|
||||
ret <vscale x 8 x i1> %c
|
||||
}
|
||||
|
||||
|
@ -502,11 +502,11 @@ define <vscale x 8 x i1> @insert_nxv8i1_v8i1_16(<vscale x 8 x i1> %v, <8 x i1>*
|
|||
; CHECK-NEXT: vslideup.vi v0, v8, 2
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <8 x i1>, <8 x i1>* %svp
|
||||
%c = call <vscale x 8 x i1> @llvm.experimental.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1> %v, <8 x i1> %sv, i64 16)
|
||||
%c = call <vscale x 8 x i1> @llvm.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1> %v, <8 x i1> %sv, i64 16)
|
||||
ret <vscale x 8 x i1> %c
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
|
||||
|
||||
define void @insert_v2i64_nxv16i64(<2 x i64>* %psv0, <2 x i64>* %psv1, <vscale x 16 x i64>* %out) {
|
||||
; CHECK-LABEL: insert_v2i64_nxv16i64:
|
||||
|
@ -520,8 +520,8 @@ define void @insert_v2i64_nxv16i64(<2 x i64>* %psv0, <2 x i64>* %psv1, <vscale x
|
|||
; CHECK-NEXT: ret
|
||||
%sv0 = load <2 x i64>, <2 x i64>* %psv0
|
||||
%sv1 = load <2 x i64>, <2 x i64>* %psv1
|
||||
%v0 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
|
||||
%v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -534,7 +534,7 @@ define void @insert_v2i64_nxv16i64_lo0(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
|
|||
; CHECK-NEXT: vs8r.v v8, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i64>, <2 x i64>* %psv
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -549,7 +549,7 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
|
|||
; CHECK-NEXT: vs8r.v v16, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i64>, <2 x i64>* %psv
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -585,24 +585,24 @@ define void @insert_v2i64_nxv16i64_hi(<2 x i64>* %psv, <vscale x 16 x i64>* %out
|
|||
; CHECK-NEXT: addi sp, sp, 64
|
||||
; CHECK-NEXT: ret
|
||||
%sv = load <2 x i64>, <2 x i64>* %psv
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 8)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 8)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <8 x i1> @llvm.experimental.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64)
|
||||
declare <32 x i1> @llvm.experimental.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64)
|
||||
declare <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64)
|
||||
declare <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64)
|
||||
|
||||
declare <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64)
|
||||
declare <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64)
|
||||
|
||||
declare <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64)
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64)
|
||||
declare <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64)
|
||||
declare <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64)
|
||||
|
||||
declare <vscale x 2 x i1> @llvm.experimental.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1>, <4 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.experimental.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1>, <8 x i1>, i64)
|
||||
declare <vscale x 2 x i1> @llvm.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1>, <4 x i1>, i64)
|
||||
declare <vscale x 8 x i1> @llvm.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1>, <8 x i1>, i64)
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.experimental.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16>, <2 x i16>, i64)
|
||||
declare <vscale x 2 x i16> @llvm.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16>, <2 x i16>, i64)
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32>, <2 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.v4i32.nxv8i32(<vscale x 8 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32>, <8 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32>, <2 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.v4i32.nxv8i32(<vscale x 8 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32>, <8 x i32>, i64)
|
||||
|
|
|
@ -7,7 +7,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vs
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v8, v12
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vs
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v10, v12
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vs
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v12
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vs
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v9, v12
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vs
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v10, v12
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec, <vs
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v11, v12
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
|
||||
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
|
||||
ret <vscale x 8 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_0(<vscale x 4 x i8> %vec, <vscale
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v8, v9, 0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
|
||||
%v = call <vscale x 4 x i8> @llvm.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
|
||||
ret <vscale x 4 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v9, a1
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
|
||||
%v = call <vscale x 4 x i8> @llvm.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
|
||||
ret <vscale x 4 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv4r.v v8, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 0)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 0)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv4r.v v12, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 8)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 8)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v8, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v10, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v12, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 8)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 8)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv2r.v v14, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 12)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 12)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v8, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v9, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v10, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v11, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -178,7 +178,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v12, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 8)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 8)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v13, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 10)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 10)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v14, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 12)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 12)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec,
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmv1r.v v15, v16
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 14)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 14)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec,
|
|||
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v8, v16, 0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 0)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 0)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec,
|
|||
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v16, a0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 1)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 1)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -242,7 +242,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_6(<vscale x 16 x i32> %vec,
|
|||
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v11, v16, 0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 6)
|
||||
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 6)
|
||||
ret <vscale x 16 x i32> %v
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_0(<vscale x 16 x i8> %vec, <vsc
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v8, v10, 0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
|
||||
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
|
||||
ret <vscale x 16 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -267,7 +267,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_1(<vscale x 16 x i8> %vec, <vsc
|
|||
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v10, a0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 1)
|
||||
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 1)
|
||||
ret <vscale x 16 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_2(<vscale x 16 x i8> %vec, <vsc
|
|||
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v10, a0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
|
||||
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
|
||||
ret <vscale x 16 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -296,7 +296,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_3(<vscale x 16 x i8> %vec, <vsc
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v10, a1
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
|
||||
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
|
||||
ret <vscale x 16 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_7(<vscale x 16 x i8> %vec, <vsc
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v10, a1
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 7)
|
||||
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 7)
|
||||
ret <vscale x 16 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -322,7 +322,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_15(<vscale x 16 x i8> %vec, <vs
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v9, v10, a1
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 15)
|
||||
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 15)
|
||||
ret <vscale x 16 x i8> %v
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_0(<vscale x 32 x half> %vec
|
|||
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v8, v16, 0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 0)
|
||||
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 0)
|
||||
ret <vscale x 32 x half> %v
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_2(<vscale x 32 x half> %vec
|
|||
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v8, v16, a0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 2)
|
||||
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 2)
|
||||
ret <vscale x 32 x half> %v
|
||||
}
|
||||
|
||||
|
@ -360,7 +360,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_26(<vscale x 32 x half> %ve
|
|||
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v14, v16, a0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 26)
|
||||
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 26)
|
||||
ret <vscale x 32 x half> %v
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_0(<vscale x 1 x half>
|
|||
; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 0)
|
||||
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 0)
|
||||
ret <vscale x 32 x half> %v
|
||||
}
|
||||
|
||||
|
@ -382,7 +382,7 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_26(<vscale x 1 x half
|
|||
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v14, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
|
||||
%v = call <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
|
||||
ret <vscale x 32 x half> %v
|
||||
}
|
||||
|
||||
|
@ -394,7 +394,7 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_0(<vscale x 32 x i1> %v, <vscal
|
|||
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v8, 0
|
||||
; CHECK-NEXT: ret
|
||||
%vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 0)
|
||||
%vec = call <vscale x 32 x i1> @llvm.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 0)
|
||||
ret <vscale x 32 x i1> %vec
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_8(<vscale x 32 x i1> %v, <vscal
|
|||
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vx v0, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 8)
|
||||
%vec = call <vscale x 32 x i1> @llvm.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 8)
|
||||
ret <vscale x 32 x i1> %vec
|
||||
}
|
||||
|
||||
|
@ -428,7 +428,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_0(<vscale x 4 x i1> %v, <vscale x
|
|||
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmsne.vi v0, v9, 0
|
||||
; CHECK-NEXT: ret
|
||||
%vec = call <vscale x 4 x i1> @llvm.experimental.vector.insert.nxv1i1.nxv4i1(<vscale x 4 x i1> %v, <vscale x 1 x i1> %sv, i64 0)
|
||||
%vec = call <vscale x 4 x i1> @llvm.vector.insert.nxv1i1.nxv4i1(<vscale x 4 x i1> %v, <vscale x 1 x i1> %sv, i64 0)
|
||||
ret <vscale x 4 x i1> %vec
|
||||
}
|
||||
|
||||
|
@ -451,11 +451,11 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_2(<vscale x 4 x i1> %v, <vscale x
|
|||
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmsne.vi v0, v9, 0
|
||||
; CHECK-NEXT: ret
|
||||
%vec = call <vscale x 4 x i1> @llvm.experimental.vector.insert.nxv1i1.nxv4i1(<vscale x 4 x i1> %v, <vscale x 1 x i1> %sv, i64 2)
|
||||
%vec = call <vscale x 4 x i1> @llvm.vector.insert.nxv1i1.nxv4i1(<vscale x 4 x i1> %v, <vscale x 1 x i1> %sv, i64 2)
|
||||
ret <vscale x 4 x i1> %vec
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64)
|
||||
declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64)
|
||||
|
||||
define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64> %sv1, <vscale x 16 x i64>* %out) {
|
||||
; CHECK-LABEL: insert_nxv8i64_nxv16i64:
|
||||
|
@ -466,8 +466,8 @@ define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64>
|
|||
; CHECK-NEXT: add a0, a0, a1
|
||||
; CHECK-NEXT: vs8r.v v16, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%v0 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
|
||||
%v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -477,7 +477,7 @@ define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, <vscale x 16 x
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vs8r.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
@ -490,25 +490,25 @@ define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x
|
|||
; CHECK-NEXT: add a0, a0, a1
|
||||
; CHECK-NEXT: vs8r.v v8, (a0)
|
||||
; CHECK-NEXT: ret
|
||||
%v = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
|
||||
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i1> @llvm.experimental.vector.insert.nxv1i1.nxv4i1(<vscale x 4 x i1>, <vscale x 1 x i1>, i64)
|
||||
declare <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1>, <vscale x 8 x i1>, i64)
|
||||
declare <vscale x 4 x i1> @llvm.vector.insert.nxv1i1.nxv4i1(<vscale x 4 x i1>, <vscale x 1 x i1>, i64)
|
||||
declare <vscale x 32 x i1> @llvm.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1>, <vscale x 8 x i1>, i64)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8>, <vscale x 1 x i8>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8>, <vscale x 1 x i8>, i64)
|
||||
|
||||
declare <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half>, <vscale x 1 x half>, i64)
|
||||
declare <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half>, <vscale x 2 x half>, i64)
|
||||
declare <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half>, <vscale x 1 x half>, i64)
|
||||
declare <vscale x 32 x half> @llvm.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half>, <vscale x 2 x half>, i64)
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8>, <vscale x 1 x i8>, i64 %idx)
|
||||
declare <vscale x 4 x i8> @llvm.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8>, <vscale x 1 x i8>, i64 %idx)
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32>, <vscale x 2 x i32>, i64 %idx)
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32>, <vscale x 4 x i32>, i64 %idx)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32>, <vscale x 2 x i32>, i64 %idx)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32>, <vscale x 4 x i32>, i64 %idx)
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32>, <vscale x 1 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32>, <vscale x 2 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32>, <vscale x 8 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32>, <vscale x 1 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32>, <vscale x 2 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32>, <vscale x 4 x i32>, i64 %idx)
|
||||
declare <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32>, <vscale x 8 x i32>, i64 %idx)
|
||||
|
|
|
@ -1266,8 +1266,8 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64
|
|||
|
||||
declare <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0f64(<vscale x 16 x i64*>, i32, <vscale x 16 x i1>, <vscale x 16 x i64>)
|
||||
|
||||
declare <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64 %idx)
|
||||
declare <vscale x 16 x i64*> @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64(<vscale x 16 x i64*>, <vscale x 8 x i64*>, i64 %idx)
|
||||
declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64 %idx)
|
||||
declare <vscale x 16 x i64*> @llvm.vector.insert.nxv8p0i64.nxv16p0i64(<vscale x 16 x i64*>, <vscale x 8 x i64*>, i64 %idx)
|
||||
|
||||
define void @mgather_nxv16i64(<vscale x 8 x i64*> %ptrs0, <vscale x 8 x i64*> %ptrs1, <vscale x 16 x i1> %m, <vscale x 8 x i64> %passthru0, <vscale x 8 x i64> %passthru1, <vscale x 16 x i64>* %out) {
|
||||
; RV32-LABEL: mgather_nxv16i64:
|
||||
|
@ -1318,11 +1318,11 @@ define void @mgather_nxv16i64(<vscale x 8 x i64*> %ptrs0, <vscale x 8 x i64*> %p
|
|||
; RV64-NEXT: add sp, sp, a0
|
||||
; RV64-NEXT: addi sp, sp, 16
|
||||
; RV64-NEXT: ret
|
||||
%p0 = call <vscale x 16 x i64*> @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64(<vscale x 16 x i64*> undef, <vscale x 8 x i64*> %ptrs0, i64 0)
|
||||
%p1 = call <vscale x 16 x i64*> @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64(<vscale x 16 x i64*> %p0, <vscale x 8 x i64*> %ptrs1, i64 8)
|
||||
%p0 = call <vscale x 16 x i64*> @llvm.vector.insert.nxv8p0i64.nxv16p0i64(<vscale x 16 x i64*> undef, <vscale x 8 x i64*> %ptrs0, i64 0)
|
||||
%p1 = call <vscale x 16 x i64*> @llvm.vector.insert.nxv8p0i64.nxv16p0i64(<vscale x 16 x i64*> %p0, <vscale x 8 x i64*> %ptrs1, i64 8)
|
||||
|
||||
%pt0 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %passthru0, i64 0)
|
||||
%pt1 = call <vscale x 16 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %pt0, <vscale x 8 x i64> %passthru1, i64 8)
|
||||
%pt0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %passthru0, i64 0)
|
||||
%pt1 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %pt0, <vscale x 8 x i64> %passthru1, i64 8)
|
||||
|
||||
%v = call <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0f64(<vscale x 16 x i64*> %p1, i32 8, <vscale x 16 x i1> %m, <vscale x 16 x i64> %pt1)
|
||||
store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
|
||||
|
|
|
@ -1819,8 +1819,8 @@ define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base,
|
|||
|
||||
declare void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double>, <vscale x 16 x double*>, i32, <vscale x 16 x i1>)
|
||||
|
||||
declare <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double>, <vscale x 8 x double>, i64)
|
||||
declare <vscale x 16 x double*> @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*>, <vscale x 8 x double*>, i64)
|
||||
declare <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double>, <vscale x 8 x double>, i64)
|
||||
declare <vscale x 16 x double*> @llvm.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*>, <vscale x 8 x double*>, i64)
|
||||
|
||||
define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, <vscale x 8 x double*> %ptrs0, <vscale x 8 x double*> %ptrs1, <vscale x 16 x i1> %m) {
|
||||
; RV32-LABEL: mscatter_nxv16f64:
|
||||
|
@ -1863,10 +1863,10 @@ define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double
|
|||
; RV64-NEXT: add sp, sp, a0
|
||||
; RV64-NEXT: addi sp, sp, 16
|
||||
; RV64-NEXT: ret
|
||||
%p0 = call <vscale x 16 x double*> @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*> undef, <vscale x 8 x double*> %ptrs0, i64 0)
|
||||
%p1 = call <vscale x 16 x double*> @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*> %p0, <vscale x 8 x double*> %ptrs1, i64 8)
|
||||
%v0 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
|
||||
%v1 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
|
||||
%p0 = call <vscale x 16 x double*> @llvm.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*> undef, <vscale x 8 x double*> %ptrs0, i64 0)
|
||||
%p1 = call <vscale x 16 x double*> @llvm.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*> %p0, <vscale x 8 x double*> %ptrs1, i64 8)
|
||||
%v0 = call <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
|
||||
%v1 = call <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
|
||||
call void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double> %v1, <vscale x 16 x double*> %p1, i32 8, <vscale x 16 x i1> %m)
|
||||
ret void
|
||||
}
|
||||
|
@ -1905,8 +1905,8 @@ define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vsc
|
|||
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
|
||||
; RV64-NEXT: ret
|
||||
%ptrs = getelementptr inbounds double, double* %base, <vscale x 16 x i8> %idxs
|
||||
%v0 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
|
||||
%v1 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
|
||||
%v0 = call <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
|
||||
%v1 = call <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
|
||||
call void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double> %v1, <vscale x 16 x double*> %ptrs, i32 8, <vscale x 16 x i1> %m)
|
||||
ret void
|
||||
}
|
||||
|
@ -1945,8 +1945,8 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
|
|||
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
|
||||
; RV64-NEXT: ret
|
||||
%ptrs = getelementptr inbounds double, double* %base, <vscale x 16 x i16> %idxs
|
||||
%v0 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
|
||||
%v1 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
|
||||
%v0 = call <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
|
||||
%v1 = call <vscale x 16 x double> @llvm.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
|
||||
call void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double> %v1, <vscale x 16 x double*> %ptrs, i32 8, <vscale x 16 x i1> %m)
|
||||
ret void
|
||||
}
|
||||
|
|
|
@ -482,8 +482,8 @@ define <vscale x 16 x double> @vpload_nxv16f64(<vscale x 16 x double>* %ptr, <vs
|
|||
|
||||
declare <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0nxv17f64(<vscale x 17 x double>*, <vscale x 17 x i1>, i32)
|
||||
|
||||
declare <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64(<vscale x 17 x double> %vec, i64 %idx)
|
||||
declare <vscale x 16 x double> @llvm.experimental.vector.extract.nxv16f64(<vscale x 17 x double> %vec, i64 %idx)
|
||||
declare <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %vec, i64 %idx)
|
||||
declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %vec, i64 %idx)
|
||||
|
||||
; Note: We can't return <vscale x 17 x double> as that introduces a vector
|
||||
; store can't yet be legalized through widening. In order to test purely the
|
||||
|
@ -542,8 +542,8 @@ define <vscale x 16 x double> @vpload_nxv17f64(<vscale x 17 x double>* %ptr, <vs
|
|||
; CHECK-NEXT: vs1r.v v24, (a1)
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 17 x double> @llvm.vp.load.nxv17f64.p0nxv17f64(<vscale x 17 x double>* %ptr, <vscale x 17 x i1> %m, i32 %evl)
|
||||
%lo = call <vscale x 16 x double> @llvm.experimental.vector.extract.nxv16f64(<vscale x 17 x double> %load, i64 0)
|
||||
%hi = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64(<vscale x 17 x double> %load, i64 16)
|
||||
%lo = call <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x double> %load, i64 0)
|
||||
%hi = call <vscale x 1 x double> @llvm.vector.extract.nxv1f64(<vscale x 17 x double> %load, i64 16)
|
||||
store <vscale x 1 x double> %hi, <vscale x 1 x double>* %out
|
||||
ret <vscale x 16 x double> %lo
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ define <vscale x 16 x i1> @dupq_b_0() #0 {
|
|||
; CHECK-LABEL: @dupq_b_0(
|
||||
; CHECK: ret <vscale x 16 x i1> zeroinitializer
|
||||
%1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
<16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0,
|
||||
i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i64 0)
|
||||
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2 , i64 0)
|
||||
|
@ -23,7 +23,7 @@ define <vscale x 16 x i1> @dupq_b_d() #0 {
|
|||
; CHECK-NEXT: %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> %2
|
||||
%1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
<16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0,
|
||||
i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i64 0)
|
||||
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2 , i64 0)
|
||||
|
@ -38,7 +38,7 @@ define <vscale x 16 x i1> @dupq_b_w() #0 {
|
|||
; CHECK-NEXT: %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> %2
|
||||
%1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
<16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0,
|
||||
i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0>, i64 0)
|
||||
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2 , i64 0)
|
||||
|
@ -53,7 +53,7 @@ define <vscale x 16 x i1> @dupq_b_h() #0 {
|
|||
; CHECK-NEXT: %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> %2
|
||||
%1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
<16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0,
|
||||
i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, i64 0)
|
||||
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2 , i64 0)
|
||||
|
@ -67,7 +67,7 @@ define <vscale x 16 x i1> @dupq_b_b() #0 {
|
|||
; CHECK: %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
; CHECK-NEXT: ret <vscale x 16 x i1> %1
|
||||
%1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
%2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef,
|
||||
<16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
|
||||
i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, i64 0)
|
||||
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2 , i64 0)
|
||||
|
@ -82,7 +82,7 @@ define <vscale x 8 x i1> @dupq_h_0() #0 {
|
|||
; CHECK-LABEL: @dupq_h_0(
|
||||
; CHECK: ret <vscale x 8 x i1> zeroinitializer
|
||||
%1 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
<8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, i64 0)
|
||||
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -97,7 +97,7 @@ define <vscale x 8 x i1> @dupq_h_d() #0 {
|
|||
; CHECK-NEXT: %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %2)
|
||||
; CHECK-NEXT: ret <vscale x 8 x i1> %3
|
||||
%1 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
<8 x i16> <i16 1, i16 0, i16 0, i16 0, i16 1, i16 0, i16 0, i16 0>, i64 0)
|
||||
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -112,7 +112,7 @@ define <vscale x 8 x i1> @dupq_h_w() #0 {
|
|||
; CHECK-NEXT: %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %2)
|
||||
; CHECK-NEXT: ret <vscale x 8 x i1> %3
|
||||
%1 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
<8 x i16> <i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0>, i64 0)
|
||||
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -125,7 +125,7 @@ define <vscale x 8 x i1> @dupq_h_h() #0 {
|
|||
; CHECK: %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
; CHECK-NEXT: ret <vscale x 8 x i1> %1
|
||||
%1 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
%2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef,
|
||||
<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, i64 0)
|
||||
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -139,7 +139,7 @@ define <vscale x 4 x i1> @dupq_w_0() #0 {
|
|||
; CHECK-LABEL: @dupq_w_0(
|
||||
; CHECK: ret <vscale x 4 x i1> zeroinitializer
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -154,7 +154,7 @@ define <vscale x 4 x i1> @dupq_w_d() #0 {
|
|||
; CHECK-NEXT: %3 = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %2)
|
||||
; CHECK-NEXT: ret <vscale x 4 x i1> %3
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 1, i32 0, i32 1, i32 0>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -167,7 +167,7 @@ define <vscale x 4 x i1> @dupq_w_w() #0 {
|
|||
; CHECK: %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
; CHECK-NEXT: ret <vscale x 4 x i1> %1
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 1, i32 1, i32 1, i32 1>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -181,7 +181,7 @@ define <vscale x 2 x i1> @dupq_d_0() #0 {
|
|||
; CHECK-LABEL: @dupq_d_0(
|
||||
; CHECK: ret <vscale x 2 x i1> zeroinitializer
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 0, i64 0>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -194,7 +194,7 @@ define <vscale x 2 x i1> @dupq_d_d() #0 {
|
|||
; CHECK: %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: ret <vscale x 2 x i1> %1
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -209,7 +209,7 @@ define <vscale x 2 x i1> @dupq_neg1() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 0>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -222,7 +222,7 @@ define <vscale x 4 x i1> @dupq_neg2() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 1, i32 0, i32 0, i32 1>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -235,7 +235,7 @@ define <vscale x 4 x i1> @dupq_neg3() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 0, i32 1, i32 0, i32 1>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -248,7 +248,7 @@ define <vscale x 4 x i1> @dupq_neg4() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 1, i32 1, i32 0, i32 0>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -261,7 +261,7 @@ define <vscale x 4 x i1> @dupq_neg5() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef,
|
||||
<4 x i32> <i32 0, i32 0, i32 0, i32 1>, i64 0)
|
||||
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -276,7 +276,7 @@ define <vscale x 4 x i1> @dupq_neg6(i1 %a) #0 {
|
|||
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%2 = zext i1 %a to i32
|
||||
%3 = insertelement <4 x i32> <i32 1, i32 1, i32 1, i32 poison>, i32 %2, i32 3
|
||||
%4 = tail call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %3, i64 0)
|
||||
%4 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %3, i64 0)
|
||||
%5 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %4 , i64 0)
|
||||
%6 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
%7 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %1, <vscale x 4 x i32> %5, <vscale x 2 x i64> %6)
|
||||
|
@ -288,7 +288,7 @@ define <vscale x 2 x i1> @dupq_neg7() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 2)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -301,7 +301,7 @@ define <vscale x 2 x i1> @dupq_neg8() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 1)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -314,7 +314,7 @@ define <vscale x 2 x i1> @dupq_neg9(<vscale x 2 x i64> %x) #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %x,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %x,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -327,7 +327,7 @@ define <vscale x 2 x i1> @dupq_neg10() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
|
||||
|
@ -339,7 +339,7 @@ define <vscale x 2 x i1> @dupq_neg11(<vscale x 2 x i1> %pg) #0 {
|
|||
; CHECK-LABEL: @dupq_neg11(
|
||||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1 , i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -352,7 +352,7 @@ define <vscale x 2 x i1> @dupq_neg12() #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 15)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
|
||||
|
@ -365,7 +365,7 @@ define <vscale x 2 x i1> @dupq_neg13(<vscale x 2 x i64> %x) #0 {
|
|||
; CHECK: cmpne
|
||||
; CHECK-NEXT: ret
|
||||
%1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef,
|
||||
<2 x i64> <i64 1, i64 1>, i64 0)
|
||||
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2 , i64 0)
|
||||
%4 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %1, <vscale x 2 x i64> %3, <vscale x 2 x i64> %x)
|
||||
|
@ -377,10 +377,10 @@ declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
|
|||
declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
|
||||
declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
|
||||
|
||||
; llvm.experimental.vector.extract canonicalizes to shufflevector in the fixed case. In the
|
||||
; llvm.vector.extract canonicalizes to shufflevector in the fixed case. In the
|
||||
; scalable case, we lower to the EXTRACT_SUBVECTOR ISD node.
|
||||
|
||||
declare <10 x i32> @llvm.experimental.vector.extract.v10i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 %idx)
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <10 x i32> @llvm.vector.extract.v10i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 %idx)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
declare <8 x i32> @llvm.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 %idx)
|
||||
|
||||
; ============================================================================ ;
|
||||
; Trivial cases
|
||||
|
@ -20,7 +20,7 @@ define <8 x i32> @trivial_nop(<8 x i32> %vec) {
|
|||
; CHECK-LABEL: @trivial_nop(
|
||||
; CHECK-NEXT: ret <8 x i32> [[VEC:%.*]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 0)
|
||||
%1 = call <8 x i32> @llvm.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 0)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ define <2 x i32> @valid_extraction_a(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
|
||||
; CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 0)
|
||||
%1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 0)
|
||||
ret <2 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ define <2 x i32> @valid_extraction_b(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
|
||||
; CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 2)
|
||||
%1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 2)
|
||||
ret <2 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ define <2 x i32> @valid_extraction_c(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
|
||||
; CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 4)
|
||||
%1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 4)
|
||||
ret <2 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ define <2 x i32> @valid_extraction_d(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> <i32 6, i32 7>
|
||||
; CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 6)
|
||||
%1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 6)
|
||||
ret <2 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ define <4 x i32> @valid_extraction_e(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 0)
|
||||
%1 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 0)
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ define <4 x i32> @valid_extraction_f(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 4)
|
||||
%1 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 4)
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ define <3 x i32> @valid_extraction_g(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
|
||||
; CHECK-NEXT: ret <3 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 0)
|
||||
%1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 0)
|
||||
ret <3 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ define <3 x i32> @valid_extraction_h(<8 x i32> %vec) {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <3 x i32> <i32 3, i32 4, i32 5>
|
||||
; CHECK-NEXT: ret <3 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 3)
|
||||
%1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 3)
|
||||
ret <3 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -108,9 +108,9 @@ define <3 x i32> @valid_extraction_h(<8 x i32> %vec) {
|
|||
; EXTRACT_SUBVECTOR ISD node later.
|
||||
define <4 x i32> @scalable_extract(<vscale x 4 x i32> %vec) {
|
||||
; CHECK-LABEL: @scalable_extract(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[VEC:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> [[VEC:%.*]], i64 0)
|
||||
; CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
|
||||
%1 = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
|
||||
|
||||
; llvm.experimental.vector.insert canonicalizes to shufflevector in the fixed case. In the
|
||||
; llvm.vector.insert canonicalizes to shufflevector in the fixed case. In the
|
||||
; scalable case, we lower to the INSERT_SUBVECTOR ISD node.
|
||||
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 %idx)
|
||||
declare <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 %idx)
|
||||
declare <8 x i32> @llvm.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 %idx)
|
||||
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 %idx)
|
||||
|
||||
; ============================================================================ ;
|
||||
; Trivial cases
|
||||
|
@ -20,7 +20,7 @@ define <8 x i32> @trivial_nop(<8 x i32> %vec, <8 x i32> %subvec) {
|
|||
; CHECK-LABEL: @trivial_nop(
|
||||
; CHECK-NEXT: ret <8 x i32> [[SUBVEC:%.*]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 0)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 0)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ define <8 x i32> @valid_insertion_a(<8 x i32> %vec, <2 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[VEC:%.*]], <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 0)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 0)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ define <8 x i32> @valid_insertion_b(<8 x i32> %vec, <2 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 2)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 2)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ define <8 x i32> @valid_insertion_c(<8 x i32> %vec, <2 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 6, i32 7>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 4)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 4)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ define <8 x i32> @valid_insertion_d(<8 x i32> %vec, <2 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 6)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 6)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ define <8 x i32> @valid_insertion_e(<8 x i32> %vec, <4 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[VEC:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 0)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 0)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ define <8 x i32> @valid_insertion_f(<8 x i32> %vec, <4 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 4)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 4)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ define <8 x i32> @valid_insertion_g(<8 x i32> %vec, <3 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[VEC:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 0)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 0)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ define <8 x i32> @valid_insertion_h(<8 x i32> %vec, <3 x i32> %subvec) {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 8, i32 9, i32 10, i32 6, i32 7>
|
||||
; CHECK-NEXT: ret <8 x i32> [[TMP2]]
|
||||
;
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 3)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 3)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -116,9 +116,9 @@ define <8 x i32> @valid_insertion_h(<8 x i32> %vec, <3 x i32> %subvec) {
|
|||
; INSERT_SUBVECTOR ISD node later.
|
||||
define <vscale x 4 x i32> @scalable_insert(<vscale x 4 x i32> %vec, <4 x i32> %subvec) {
|
||||
; CHECK-LABEL: @scalable_insert(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[VEC:%.*]], <4 x i32> [[SUBVEC:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> [[VEC:%.*]], <4 x i32> [[SUBVEC:%.*]], i64 0)
|
||||
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 0)
|
||||
%1 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 0)
|
||||
ret <vscale x 4 x i32> %1
|
||||
}
|
||||
|
|
|
@ -5,22 +5,22 @@ define <16 x i8> @redundant_insert_extract_chain(<16 x i8> %x) {
|
|||
; CHECK-LABEL: @redundant_insert_extract_chain(
|
||||
; CHECK-NEXT: ret <16 x i8> [[X:%.*]]
|
||||
;
|
||||
%inserted = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
|
||||
%extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
|
||||
%inserted = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
|
||||
%extracted = call <16 x i8> @llvm.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
|
||||
ret <16 x i8> %extracted
|
||||
}
|
||||
|
||||
define <8 x i8> @non_redundant_insert_extract_chain(<16 x i8> %x) {
|
||||
; CHECK-LABEL: @non_redundant_insert_extract_chain(
|
||||
; CHECK-NEXT: [[INSERTED:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> [[INSERTED]], i64 0)
|
||||
; CHECK-NEXT: [[INSERTED:%.*]] = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> [[INSERTED]], i64 0)
|
||||
; CHECK-NEXT: ret <8 x i8> [[EXTRACTED]]
|
||||
;
|
||||
%inserted = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
|
||||
%extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
|
||||
%inserted = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
|
||||
%extracted = call <8 x i8> @llvm.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
|
||||
ret <8 x i8> %extracted
|
||||
}
|
||||
|
||||
declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8>, <16 x i8>, i64)
|
||||
declare <16 x i8> @llvm.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <8 x i8> @llvm.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8>, <16 x i8>, i64)
|
||||
|
|
|
@ -5,33 +5,33 @@ define <vscale x 16 x i8> @redundant_extract_insert_chain(<vscale x 16 x i8> %x)
|
|||
; CHECK-LABEL: @redundant_extract_insert_chain(
|
||||
; CHECK-NEXT: ret <vscale x 16 x i8> [[X:%.*]]
|
||||
;
|
||||
%extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> %x, i64 0)
|
||||
%inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
|
||||
%extracted = call <32 x i8> @llvm.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> %x, i64 0)
|
||||
%inserted = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
|
||||
ret <vscale x 16 x i8> %inserted
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @non_redundant_extract_insert_chain_0(<vscale x 32 x i8> %x) {
|
||||
; CHECK-LABEL: @non_redundant_extract_insert_chain_0(
|
||||
; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> [[X:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[INSERTED:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> [[EXTRACTED]], i64 0)
|
||||
; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> [[X:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[INSERTED:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> [[EXTRACTED]], i64 0)
|
||||
; CHECK-NEXT: ret <vscale x 16 x i8> [[INSERTED]]
|
||||
;
|
||||
%extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> %x, i64 0)
|
||||
%inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
|
||||
%extracted = call <32 x i8> @llvm.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> %x, i64 0)
|
||||
%inserted = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
|
||||
ret <vscale x 16 x i8> %inserted
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @non_redundant_extract_insert_chain_1(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y) {
|
||||
; CHECK-LABEL: @non_redundant_extract_insert_chain_1(
|
||||
; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> [[X:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[INSERTED:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> [[Y:%.*]], <32 x i8> [[EXTRACTED]], i64 0)
|
||||
; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> [[X:%.*]], i64 0)
|
||||
; CHECK-NEXT: [[INSERTED:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> [[Y:%.*]], <32 x i8> [[EXTRACTED]], i64 0)
|
||||
; CHECK-NEXT: ret <vscale x 16 x i8> [[INSERTED]]
|
||||
;
|
||||
%extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> %x, i64 0)
|
||||
%inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> %y, <32 x i8> %extracted, i64 0)
|
||||
%extracted = call <32 x i8> @llvm.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> %x, i64 0)
|
||||
%inserted = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> %y, <32 x i8> %extracted, i64 0)
|
||||
ret <vscale x 16 x i8> %inserted
|
||||
}
|
||||
|
||||
declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8>, <32 x i8>, i64)
|
||||
declare <32 x i8> @llvm.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <32 x i8> @llvm.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8>, <32 x i8>, i64)
|
||||
|
|
|
@ -8,9 +8,9 @@ define void @load_factor2(<32 x i16>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <32 x i16>* %ptr to i16*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1> [[PTRUE]], i16* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16(<vscale x 8 x i16> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv8i16(<vscale x 8 x i16> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16(<vscale x 8 x i16> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv8i16(<vscale x 8 x i16> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
%interleaved.vec = load <32 x i16>, <32 x i16>* %ptr, align 4
|
||||
%v0 = shufflevector <32 x i16> %interleaved.vec, <32 x i16> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14,
|
||||
|
@ -26,11 +26,11 @@ define void @load_factor3(<24 x i32>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <24 x i32>* %ptr to i32*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1> [[PTRUE]], i32* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[LDN]], 2
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32(<vscale x 4 x i32> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv4i32(<vscale x 4 x i32> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
%interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4
|
||||
%v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
|
||||
|
@ -45,13 +45,13 @@ define void @load_factor4(<16 x i64>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i64>* %ptr to i64*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1> [[PTRUE]], i64* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 3
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 2
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
%interleaved.vec = load <16 x i64>, <16 x i64>* %ptr, align 4
|
||||
%v0 = shufflevector <16 x i64> %interleaved.vec, <16 x i64> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
||||
|
@ -65,9 +65,9 @@ define void @store_factor2(<32 x i16>* %ptr, <16 x i16> %v0, <16 x i16> %v1) #0
|
|||
; CHECK-LABEL: @store_factor2(
|
||||
; CHECK-NEXT: [[PTRUE:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %v0, <16 x i16> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v16i16(<vscale x 8 x i16> undef, <16 x i16> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v16i16(<vscale x 8 x i16> undef, <16 x i16> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> %v0, <16 x i16> %v1, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v16i16(<vscale x 8 x i16> undef, <16 x i16> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v16i16(<vscale x 8 x i16> undef, <16 x i16> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[PTR:%.*]] = bitcast <32 x i16>* %ptr to i16*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16> [[INS1]], <vscale x 8 x i16> [[INS2]], <vscale x 8 x i1> [[PTRUE]], i16* [[PTR]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -81,11 +81,11 @@ define void @store_factor3(<24 x i32>* %ptr, <8 x i32> %v0, <8 x i32> %v1, <8 x
|
|||
; CHECK-LABEL: @store_factor3(
|
||||
; CHECK: [[PTRUE:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v8i32(<vscale x 4 x i32> undef, <8 x i32> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v8i32(<vscale x 4 x i32> undef, <8 x i32> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v8i32(<vscale x 4 x i32> undef, <8 x i32> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v8i32(<vscale x 4 x i32> undef, <8 x i32> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v8i32(<vscale x 4 x i32> undef, <8 x i32> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v8i32(<vscale x 4 x i32> undef, <8 x i32> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[PTR:%.*]] = bitcast <24 x i32>* %ptr to i32*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32> [[INS1]], <vscale x 4 x i32> [[INS2]], <vscale x 4 x i32> [[INS3]], <vscale x 4 x i1> [[PTRUE]], i32* [[PTR]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -103,13 +103,13 @@ define void @store_factor4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x
|
|||
; CHECK-LABEL: @store_factor4(
|
||||
; CHECK: [[PTRUE:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[INS4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[INS4:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[PTR:%.*]] = bitcast <16 x i64>* %ptr to i64*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64> [[INS1]], <vscale x 2 x i64> [[INS2]], <vscale x 2 x i64> [[INS3]], <vscale x 2 x i64> [[INS4]], <vscale x 2 x i1> [[PTRUE]], i64* [[PTR]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -126,10 +126,10 @@ define void @load_ptrvec_factor2(<8 x i32*>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i32*>* %ptr to i64*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> [[PTRUE]], i64* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*>
|
||||
; CHECK-NEXT: ret void
|
||||
%interleaved.vec = load <8 x i32*>, <8 x i32*>* %ptr, align 4
|
||||
|
@ -144,13 +144,13 @@ define void @load_ptrvec_factor3(<12 x i32*>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <12 x i32*>* %ptr to i64*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1> [[PTRUE]], i64* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 2
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*>
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TOP3:%.*]] = inttoptr <4 x i64> [[EXT3]] to <4 x i32*>
|
||||
; CHECK-NEXT: ret void
|
||||
%interleaved.vec = load <12 x i32*>, <12 x i32*>* %ptr, align 4
|
||||
|
@ -166,16 +166,16 @@ define void @load_ptrvec_factor4(<16 x i32*>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32*>* %ptr to i64*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1> [[PTRUE]], i64* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 3
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 2
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*>
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TOP3:%.*]] = inttoptr <4 x i64> [[EXT3]] to <4 x i32*>
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TOP4:%.*]] = inttoptr <4 x i64> [[EXT4]] to <4 x i32*>
|
||||
; CHECK-NEXT: ret void
|
||||
%interleaved.vec = load <16 x i32*>, <16 x i32*>* %ptr, align 4
|
||||
|
@ -192,9 +192,9 @@ define void @store_ptrvec_factor2(<8 x i32*>* %ptr, <4 x i32*> %v0, <4 x i32*> %
|
|||
; CHECK-NEXT: [[TOI2:%.*]] = ptrtoint <4 x i32*> %v1 to <4 x i64>
|
||||
; CHECK-NEXT: [[PTRUE:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[TOI1]], <4 x i64> [[TOI2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i64> [[TOI1]], <4 x i64> [[TOI2]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[PTR:%.*]] = bitcast <8 x i32*>* %ptr to i64*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> [[INS1]], <vscale x 2 x i64> [[INS2]], <vscale x 2 x i1> [[PTRUE]], i64* [[PTR]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -209,11 +209,11 @@ define void @store_ptrvec_factor3(<12 x i32*>* %ptr, <4 x i32*> %v0, <4 x i32*>
|
|||
; CHECK-NEXT: [[TOI2:%.*]] = ptrtoint <8 x i32*> %s1 to <8 x i64>
|
||||
; CHECK-NEXT: [[PTRUE:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 8, i32 9, i32 10, i32 11>
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[PTR:%.*]] = bitcast <12 x i32*>* %ptr to i64*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64> [[INS1]], <vscale x 2 x i64> [[INS2]], <vscale x 2 x i64> [[INS3]], <vscale x 2 x i1> [[PTRUE]], i64* [[PTR]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -230,13 +230,13 @@ define void @store_ptrvec_factor4(<16 x i32*>* %ptr, <4 x i32*> %v0, <4 x i32*>
|
|||
; CHECK-NEXT: [[TOI2:%.*]] = ptrtoint <8 x i32*> %s1 to <8 x i64>
|
||||
; CHECK-NEXT: [[PTRUE:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP1]], i64 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 8, i32 9, i32 10, i32 11>
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[INS4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[INS4:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[PTR:%.*]] = bitcast <16 x i32*>* %ptr to i64*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64> [[INS1]], <vscale x 2 x i64> [[INS2]], <vscale x 2 x i64> [[INS3]], <vscale x 2 x i64> [[INS4]], <vscale x 2 x i1> [[PTRUE]], i64* [[PTR]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -254,15 +254,15 @@ define void @load_factor2_wide(<16 x i64>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[PTRUE:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> [[PTRUE]], i64* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[TMP1]], i32 8
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> [[PTRUE]], i64* [[TMP4]])
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP6]], i64 0)
|
||||
; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> [[TMP6]], i64 0)
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i64> [[EXT1]], <4 x i64> [[EXT3]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i64> [[EXT2]], <4 x i64> [[EXT4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -277,14 +277,14 @@ define void @store_factor2_wide(<16 x i64>* %ptr, <8 x i64> %v0, <8 x i64> %v1)
|
|||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i64>* %ptr to i64*
|
||||
; CHECK-NEXT: [[PTRUE:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[INS1:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[INS2:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> [[INS1]], <vscale x 2 x i64> [[INS2]], <vscale x 2 x i1> [[PTRUE]], i64* [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[INS3:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[INS4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[INS4:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> undef, <4 x i64> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, i64* [[TMP1]], i32 8
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> [[INS3]], <vscale x 2 x i64> [[INS4]], <vscale x 2 x i1> [[PTRUE]], i64* [[TMP6]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -357,13 +357,13 @@ define void @load_double_factor4(<16 x double>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x double>* [[PTR:%.*]] to double*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> [[TMP1]], double* [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } [[LDN]], 3
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } [[LDN]], 2
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP7]], i64 0)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP7]], i64 0)
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP9]], i64 0)
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64(<vscale x 2 x double> [[TMP9]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%interleaved.vec = load <16 x double>, <16 x double>* %ptr, align 4
|
||||
|
@ -380,11 +380,11 @@ define void @load_float_factor3(<24 x float>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <24 x float>* [[PTR:%.*]] to float*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP1]], float* [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP7]], i64 0)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP7]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%interleaved.vec = load <24 x float>, <24 x float>* %ptr, align 4
|
||||
|
@ -400,9 +400,9 @@ define void @load_half_factor2(<32 x half>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x half>* [[PTR:%.*]] to half*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1> [[TMP1]], half* [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16(<vscale x 8 x half> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x half> @llvm.vector.extract.v16f16.nxv8f16(<vscale x 8 x half> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16(<vscale x 8 x half> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <16 x half> @llvm.vector.extract.v16f16.nxv8f16(<vscale x 8 x half> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%interleaved.vec = load <32 x half>, <32 x half>* %ptr, align 4
|
||||
|
@ -417,9 +417,9 @@ define void @load_bfloat_factor2(<32 x bfloat>* %ptr) #0 {
|
|||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x bfloat>* [[PTR:%.*]] to bfloat*
|
||||
; CHECK-NEXT: [[LDN:%.*]] = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1> [[TMP1]], bfloat* [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[LDN]], 1
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[TMP3]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[LDN]], 0
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16(<vscale x 8 x bfloat> [[TMP5]], i64 0)
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%interleaved.vec = load <32 x bfloat>, <32 x bfloat>* %ptr, align 4
|
||||
|
@ -434,13 +434,13 @@ define void @store_double_factor4(<16 x double>* %ptr, <4 x double> %v0, <4 x do
|
|||
; CHECK-NEXT: [[S1:%.*]] = shufflevector <4 x double> [[V2:%.*]], <4 x double> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> <i32 8, i32 9, i32 10, i32 11>
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP6]], i64 0)
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP6]], i64 0)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> <i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP8]], i64 0)
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v4f64(<vscale x 2 x double> undef, <4 x double> [[TMP8]], i64 0)
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x double>* [[PTR:%.*]] to double*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double> [[TMP3]], <vscale x 2 x double> [[TMP5]], <vscale x 2 x double> [[TMP7]], <vscale x 2 x double> [[TMP9]], <vscale x 2 x i1> [[TMP1]], double* [[TMP10]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -458,11 +458,11 @@ define void @store_float_factor3(<24 x float>* %ptr, <8 x float> %v0, <8 x float
|
|||
; CHECK-NEXT: [[S1:%.*]] = shufflevector <8 x float> [[V2:%.*]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x float> [[S0]], <16 x float> [[S1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[S0]], <16 x float> [[S1]], <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <16 x float> [[S0]], <16 x float> [[S1]], <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP6]], i64 0)
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP6]], i64 0)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <24 x float>* [[PTR:%.*]] to float*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP3]], <vscale x 4 x float> [[TMP5]], <vscale x 4 x float> [[TMP7]], <vscale x 4 x i1> [[TMP1]], float* [[TMP8]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -481,9 +481,9 @@ define void @store_half_factor2(<32 x half>* %ptr, <16 x half> %v0, <16 x half>
|
|||
; CHECK-LABEL: @store_half_factor2(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x half> [[V0:%.*]], <16 x half> [[V1:%.*]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v16f16(<vscale x 8 x half> undef, <16 x half> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v16f16(<vscale x 8 x half> undef, <16 x half> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x half> [[V0]], <16 x half> [[V1]], <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v16f16(<vscale x 8 x half> undef, <16 x half> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v16f16(<vscale x 8 x half> undef, <16 x half> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x half>* [[PTR:%.*]] to half*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half> [[TMP3]], <vscale x 8 x half> [[TMP5]], <vscale x 8 x i1> [[TMP1]], half* [[TMP6]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -499,9 +499,9 @@ define void @store_bfloat_factor2(<32 x bfloat>* %ptr, <16 x bfloat> %v0, <16 x
|
|||
; CHECK-LABEL: @store_bfloat_factor2(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x bfloat> [[V0:%.*]], <16 x bfloat> [[V1:%.*]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP2]], i64 0)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x bfloat> [[V0]], <16 x bfloat> [[V1]], <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x bfloat> @llvm.experimental.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v16bf16(<vscale x 8 x bfloat> undef, <16 x bfloat> [[TMP4]], i64 0)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x bfloat>* [[PTR:%.*]] to bfloat*
|
||||
; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv8bf16(<vscale x 8 x bfloat> [[TMP3]], <vscale x 8 x bfloat> [[TMP5]], <vscale x 8 x i1> [[TMP1]], bfloat* [[TMP6]])
|
||||
; CHECK-NEXT: ret void
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
; RUN: not opt -verify -S < %s 2>&1 >/dev/null | FileCheck %s
|
||||
|
||||
; CHECK: experimental_vector_extract result must have the same element type as the input vector.
|
||||
; CHECK: vector_extract result must have the same element type as the input vector.
|
||||
define <16 x i16> @invalid_mismatched_element_types(<vscale x 16 x i8> %vec) nounwind {
|
||||
%retval = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv16i8(<vscale x 16 x i8> %vec, i64 0)
|
||||
%retval = call <16 x i16> @llvm.vector.extract.v16i16.nxv16i8(<vscale x 16 x i8> %vec, i64 0)
|
||||
ret <16 x i16> %retval
|
||||
}
|
||||
|
||||
declare <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <16 x i16> @llvm.vector.extract.v16i16.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
|
|
|
@ -4,15 +4,15 @@
|
|||
; Test that extractions/insertion indices are validated.
|
||||
;
|
||||
|
||||
; CHECK: experimental_vector_extract index must be a constant multiple of the result type's known minimum vector length.
|
||||
; CHECK: vector_extract index must be a constant multiple of the result type's known minimum vector length.
|
||||
define <4 x i32> @extract_idx_not_constant_multiple(<8 x i32> %vec) {
|
||||
%1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 1)
|
||||
%1 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 1)
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
; CHECK: experimental_vector_insert index must be a constant multiple of the subvector's known minimum vector length.
|
||||
; CHECK: vector_insert index must be a constant multiple of the subvector's known minimum vector length.
|
||||
define <8 x i32> @insert_idx_not_constant_multiple(<8 x i32> %vec, <4 x i32> %subvec) {
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 2)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 2)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
|
@ -20,53 +20,53 @@ define <8 x i32> @insert_idx_not_constant_multiple(<8 x i32> %vec, <4 x i32> %su
|
|||
; Test that extractions/insertions which 'overrun' are captured.
|
||||
;
|
||||
|
||||
; CHECK: experimental_vector_extract would overrun.
|
||||
; CHECK: vector_extract would overrun.
|
||||
define <3 x i32> @extract_overrun_fixed_fixed(<8 x i32> %vec) {
|
||||
%1 = call <3 x i32> @llvm.experimental.vector.extract.v8i32.v3i32(<8 x i32> %vec, i64 6)
|
||||
%1 = call <3 x i32> @llvm.vector.extract.v8i32.v3i32(<8 x i32> %vec, i64 6)
|
||||
ret <3 x i32> %1
|
||||
}
|
||||
|
||||
; CHECK: experimental_vector_extract would overrun.
|
||||
; CHECK: vector_extract would overrun.
|
||||
define <vscale x 3 x i32> @extract_overrun_scalable_scalable(<vscale x 8 x i32> %vec) {
|
||||
%1 = call <vscale x 3 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv3i32(<vscale x 8 x i32> %vec, i64 6)
|
||||
%1 = call <vscale x 3 x i32> @llvm.vector.extract.nxv8i32.nxv3i32(<vscale x 8 x i32> %vec, i64 6)
|
||||
ret <vscale x 3 x i32> %1
|
||||
}
|
||||
|
||||
; We cannot statically check whether or not an extraction of a fixed vector
|
||||
; from a scalable vector would overrun, because we can't compare the sizes of
|
||||
; the two. Therefore, this function should not raise verifier errors.
|
||||
; CHECK-NOT: experimental_vector_extract
|
||||
; CHECK-NOT: vector_extract
|
||||
define <3 x i32> @extract_overrun_scalable_fixed(<vscale x 8 x i32> %vec) {
|
||||
%1 = call <3 x i32> @llvm.experimental.vector.extract.nxv8i32.v3i32(<vscale x 8 x i32> %vec, i64 6)
|
||||
%1 = call <3 x i32> @llvm.vector.extract.nxv8i32.v3i32(<vscale x 8 x i32> %vec, i64 6)
|
||||
ret <3 x i32> %1
|
||||
}
|
||||
|
||||
; CHECK: subvector operand of experimental_vector_insert would overrun the vector being inserted into.
|
||||
; CHECK: subvector operand of vector_insert would overrun the vector being inserted into.
|
||||
define <8 x i32> @insert_overrun_fixed_fixed(<8 x i32> %vec, <3 x i32> %subvec) {
|
||||
%1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 6)
|
||||
%1 = call <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 6)
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
; CHECK: subvector operand of experimental_vector_insert would overrun the vector being inserted into.
|
||||
; CHECK: subvector operand of vector_insert would overrun the vector being inserted into.
|
||||
define <vscale x 8 x i32> @insert_overrun_scalable_scalable(<vscale x 8 x i32> %vec, <vscale x 3 x i32> %subvec) {
|
||||
%1 = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv3i32(<vscale x 8 x i32> %vec, <vscale x 3 x i32> %subvec, i64 6)
|
||||
%1 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv3i32(<vscale x 8 x i32> %vec, <vscale x 3 x i32> %subvec, i64 6)
|
||||
ret <vscale x 8 x i32> %1
|
||||
}
|
||||
|
||||
; We cannot statically check whether or not an insertion of a fixed vector into
|
||||
; a scalable vector would overrun, because we can't compare the sizes of the
|
||||
; two. Therefore, this function should not raise verifier errors.
|
||||
; CHECK-NOT: experimental_vector_insert
|
||||
; CHECK-NOT: vector_insert
|
||||
define <vscale x 8 x i32> @insert_overrun_scalable_fixed(<vscale x 8 x i32> %vec, <3 x i32> %subvec) {
|
||||
%1 = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.v3i32(<vscale x 8 x i32> %vec, <3 x i32> %subvec, i64 6)
|
||||
%1 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.v3i32(<vscale x 8 x i32> %vec, <3 x i32> %subvec, i64 6)
|
||||
ret <vscale x 8 x i32> %1
|
||||
}
|
||||
|
||||
declare <vscale x 3 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv3i32(<vscale x 8 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv3i32(<vscale x 8 x i32>, <vscale x 3 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.v3i32(<vscale x 8 x i32>, <3 x i32>, i64)
|
||||
declare <3 x i32> @llvm.experimental.vector.extract.nxv8i32.v3i32(<vscale x 8 x i32>, i64)
|
||||
declare <3 x i32> @llvm.experimental.vector.extract.v8i32.v3i32(<8 x i32>, i64)
|
||||
declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32>, i64)
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32>, <3 x i32>, i64)
|
||||
declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32>, <4 x i32>, i64)
|
||||
declare <vscale x 3 x i32> @llvm.vector.extract.nxv8i32.nxv3i32(<vscale x 8 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv3i32(<vscale x 8 x i32>, <vscale x 3 x i32>, i64)
|
||||
declare <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.v3i32(<vscale x 8 x i32>, <3 x i32>, i64)
|
||||
declare <3 x i32> @llvm.vector.extract.nxv8i32.v3i32(<vscale x 8 x i32>, i64)
|
||||
declare <3 x i32> @llvm.vector.extract.v8i32.v3i32(<8 x i32>, i64)
|
||||
declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64)
|
||||
declare <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32>, <3 x i32>, i64)
|
||||
declare <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32>, <4 x i32>, i64)
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
; RUN: not opt -verify -S < %s 2>&1 >/dev/null | FileCheck %s
|
||||
|
||||
; CHECK: experimental_vector_insert parameters must have the same element type.
|
||||
; CHECK: vector_insert parameters must have the same element type.
|
||||
define <vscale x 16 x i8> @invalid_mismatched_element_types(<vscale x 16 x i8> %vec, <4 x i16> %subvec) nounwind {
|
||||
%retval = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v4i16(<vscale x 16 x i8> %vec, <4 x i16> %subvec, i64 0)
|
||||
%retval = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v4i16(<vscale x 16 x i8> %vec, <4 x i16> %subvec, i64 0)
|
||||
ret <vscale x 16 x i8> %retval
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v4i16(<vscale x 16 x i8>, <4 x i16>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v4i16(<vscale x 16 x i8>, <4 x i16>, i64)
|
||||
|
|
Loading…
Reference in New Issue