diff --git a/clang/include/clang/Basic/IdentifierTable.h b/clang/include/clang/Basic/IdentifierTable.h index e074a7d3254c..f2379c7ddfbd 100644 --- a/clang/include/clang/Basic/IdentifierTable.h +++ b/clang/include/clang/Basic/IdentifierTable.h @@ -56,7 +56,7 @@ using IdentifierLocPair = std::pair; /// of a pointer to one of these classes. enum { IdentifierInfoAlignment = 8 }; -static constexpr int ObjCOrBuiltinIDBits = 15; +static constexpr int ObjCOrBuiltinIDBits = 16; /// One of these records is kept for each identifier that /// is lexed. This contains information about whether the token was \#define'd, diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 7cdec1477962..495e5ef45218 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -203,6 +203,8 @@ class RVVBuiltin { } } +defvar NFList = [2, 3, 4, 5, 6, 7, 8]; + +class PVString { + string S = + !cond(!eq(nf, 2): !if(signed, "PvPv", "PUvPUv"), + !eq(nf, 3): !if(signed, "PvPvPv", "PUvPUvPUv"), + !eq(nf, 4): !if(signed, "PvPvPvPv", "PUvPUvPUvPUv"), + !eq(nf, 5): !if(signed, "PvPvPvPvPv", "PUvPUvPUvPUvPUv"), + !eq(nf, 6): !if(signed, "PvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUv"), + !eq(nf, 7): !if(signed, "PvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUv"), + !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv")); +} + +multiclass RVVUnitStridedSegLoad { + foreach type = TypeList in { + defvar eew = !cond(!eq(type, "c") : "8", + !eq(type, "s") : "16", + !eq(type, "i") : "32", + !eq(type, "l") : "64", + !eq(type, "h") : "16", + !eq(type, "f") : "32", + !eq(type, "d") : "64"); + foreach nf = NFList in { + let Name = op # nf # "e" # eew # "_v", + IRName = op # nf, + IRNameMask = op # nf # "_mask", + NF = nf, + HasNoMaskedOverloaded = false, + ManualCodegen = [{ + { + // builtin: (val0 address, val1 address, ..., ptr, vl) + IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(), + Ops[NF + 1]->getType()}; + // intrinsic: (ptr, vl) + llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1]}; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); + clang::CharUnits Align = + CGM.getNaturalTypeAlignment(getContext().getSizeType()); + llvm::Value *V; + for (unsigned I = 0; I < NF; ++I) { + V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}), + Address(Ops[I], Align)); + } + return V; + } + }], + ManualCodegenMask = [{ + { + // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, vl) + // intrinsic: (maskedoff0, ..., ptr, mask, vl) + IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(), + Ops[2 * NF + 2]->getType()}; + SmallVector Operands; + for (unsigned I = 0; I < NF; ++I) + Operands.push_back(Ops[NF + I + 1]); + Operands.push_back(Ops[2 * NF + 1]); + Operands.push_back(Ops[NF]); + Operands.push_back(Ops[2 * NF + 2]); + assert(Operands.size() == NF + 3); + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); + clang::CharUnits Align = + CGM.getNaturalTypeAlignment(getContext().getSizeType()); + llvm::Value *V; + for (unsigned I = 0; I < NF; ++I) { + V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}), + Address(Ops[I], Align)); + } + return V; + } + }] in { + defvar PV = PVString.S; + defvar PUV = PVString.S; + def : RVVBuiltin<"v", "0" # PV # "PCe", type>; + if !not(IsFloat.val) then { + def : RVVBuiltin<"Uv", "0" # PUV # "PCUe", type>; + } + } + } + } +} + +multiclass RVVUnitStridedSegLoadFF { + foreach type = TypeList in { + defvar eew = !cond(!eq(type, "c") : "8", + !eq(type, "s") : "16", + !eq(type, "i") : "32", + !eq(type, "l") : "64", + !eq(type, "h") : "16", + !eq(type, "f") : "32", + !eq(type, "d") : "64"); + foreach nf = NFList in { + let Name = op # nf # "e" # eew # "ff_v", + IRName = op # nf # "ff", + IRNameMask = op # nf # "ff_mask", + NF = nf, + HasNoMaskedOverloaded = false, + ManualCodegen = [{ + { + // builtin: (val0 address, val1 address, ..., ptr, new_vl, vl) + IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(), + Ops[NF + 2]->getType()}; + // intrinsic: (ptr, vl) + llvm::Value *Operands[] = {Ops[NF], Ops[NF + 2]}; + Value *NewVL = Ops[NF + 1]; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); + clang::CharUnits Align = + CGM.getNaturalTypeAlignment(getContext().getSizeType()); + for (unsigned I = 0; I < NF; ++I) { + Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}), + Address(Ops[I], Align)); + } + // Store new_vl. + return Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {NF}), + Address(NewVL, Align)); + } + }], + ManualCodegenMask = [{ + { + // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, new_vl, vl) + // intrinsic: (maskedoff0, ..., ptr, mask, vl) + IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(), + Ops[2 * NF + 3]->getType()}; + SmallVector Operands; + for (unsigned I = 0; I < NF; ++I) + Operands.push_back(Ops[NF + I + 1]); + Operands.push_back(Ops[2 * NF + 1]); + Operands.push_back(Ops[NF]); + Operands.push_back(Ops[2 * NF + 3]); + Value *NewVL = Ops[2 * NF + 2]; + assert(Operands.size() == NF + 3); + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); + clang::CharUnits Align = + CGM.getNaturalTypeAlignment(getContext().getSizeType()); + for (unsigned I = 0; I < NF; ++I) { + Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}), + Address(Ops[I], Align)); + } + // Store new_vl. + return Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {NF}), + Address(NewVL, Align)); + } + }] in { + defvar PV = PVString.S; + defvar PUV = PVString.S; + def : RVVBuiltin<"v", "0" # PV # "PCe" # "Pz", type>; + if !not(IsFloat.val) then { + def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "Pz", type>; + } + } + } + } +} + multiclass RVVAMOBuiltinSet { defvar type_list = !if(has_fp, ["i","l","f","d"], ["i","l"]); @@ -1083,6 +1242,12 @@ defm vle16ff: RVVVLEFFBuiltin<["s"]>; defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>; defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; +// 7.8 Vector Load/Store Segment Instructions +let RequiredExtension = "Zvlsseg" in { +defm : RVVUnitStridedSegLoad<"vlseg">; +defm : RVVUnitStridedSegLoadFF<"vlseg">; +} + // 8. Vector AMO Operations let RequiredExtension = "Zvamo" in { defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 0a1a3f319dc1..2c24b71d030a 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -18062,6 +18062,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, Ops.push_back(EmitScalarExpr(E->getArg(i))); Intrinsic::ID ID = Intrinsic::not_intrinsic; + unsigned NF = 1; // Required for overloaded intrinsics. llvm::SmallVector IntrinsicTypes; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c new file mode 100644 index 000000000000..2c34788011af --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c @@ -0,0 +1,6486 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c new file mode 100644 index 000000000000..72a716702301 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c @@ -0,0 +1,7290 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c new file mode 100644 index 000000000000..37684ef3cdec --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c @@ -0,0 +1,12960 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf8(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf8(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf8(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf4(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf4(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf4(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf4(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t vl) { + return vlseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t vl) { + return vlseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t vl) { + return vlseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t vl) { + return vlseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf8(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf8(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf8(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf4(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf4(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf4(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf4(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t vl) { + return vlseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t vl) { + return vlseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t vl) { + return vlseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t vl) { + return vlseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32mf2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t vl) { + return vlseg3e32_v_f32mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t vl) { + return vlseg4e32_v_f32mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t vl) { + return vlseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t vl) { + return vlseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t vl) { + return vlseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t vl) { + return vlseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t vl) { + return vlseg3e32_v_f32m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t vl) { + return vlseg4e32_v_f32m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t vl) { + return vlseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t vl) { + return vlseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t vl) { + return vlseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t vl) { + return vlseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t vl) { + return vlseg3e32_v_f32m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t vl) { + return vlseg4e32_v_f32m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t vl) { + return vlseg2e64_v_f64m1(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t vl) { + return vlseg3e64_v_f64m1(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t vl) { + return vlseg4e64_v_f64m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t vl) { + return vlseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t vl) { + return vlseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t vl) { + return vlseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t vl) { + return vlseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t vl) { + return vlseg2e64_v_f64m2(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t vl) { + return vlseg3e64_v_f64m2(v0, v1, v2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t vl) { + return vlseg4e64_v_f64m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t vl) { + return vlseg2e64_v_f64m4(v0, v1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { + return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { + return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { + return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { + return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { + return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { + return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { + return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { + return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { + return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { + return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { + return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { + return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c new file mode 100644 index 000000000000..856db27392ba --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c @@ -0,0 +1,14568 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf8(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf8(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_i64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf8(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf8(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_u64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_f64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 +// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 +// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index 467a8b2f52ac..2381ab526792 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -165,6 +165,7 @@ private: // InputTypes. -1 means the return type. std::vector IntrinsicTypes; uint8_t RISCVExtensions = 0; + unsigned NF = 1; public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, @@ -172,7 +173,7 @@ public: bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, - StringRef RequiredExtension); + StringRef RequiredExtension, unsigned NF); ~RVVIntrinsic() = default; StringRef getName() const { return Name; } @@ -187,6 +188,7 @@ public: StringRef getIRName() const { return IRName; } StringRef getManualCodegen() const { return ManualCodegen; } uint8_t getRISCVExtensions() const { return RISCVExtensions; } + unsigned getNF() const { return NF; } // Return the type string for a BUILTIN() macro in Builtins.def. std::string getBuiltinTypeStr() const; @@ -231,7 +233,7 @@ private: /// and LMUL with type transformers). It also record result of type in legal /// or illegal set to avoid compute the same config again. The result maybe /// have illegal RVVType. - Optional computeTypes(BasicType BT, int Log2LMUL, + Optional computeTypes(BasicType BT, int Log2LMUL, unsigned NF, ArrayRef PrototypeSeq); Optional computeType(BasicType BT, int Log2LMUL, StringRef Proto); @@ -436,6 +438,11 @@ void RVVType::initBuiltinStr() { return; } BuiltinStr = "q" + utostr(Scale.getValue()) + BuiltinStr; + // Pointer to vector types. Defined for Zvlsseg load intrinsics. + // Zvlsseg load intrinsics have pointer type arguments to store the loaded + // vector values. + if (IsPointer) + BuiltinStr += "*"; } void RVVType::initClangBuiltinStr() { @@ -749,11 +756,11 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, - StringRef RequiredExtension) + StringRef RequiredExtension, unsigned NF) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), - ManualCodegen(ManualCodegen.str()) { + ManualCodegen(ManualCodegen.str()), NF(NF) { // Init Name and MangledName Name = NewName.str(); @@ -788,7 +795,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, if (IsMask && HasMaskedOffOperand) { for (auto &I : IntrinsicTypes) { if (I >= 0) - I += 1; + I += NF; } } } @@ -805,6 +812,8 @@ std::string RVVIntrinsic::getBuiltinTypeStr() const { void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { if (!getIRName().empty()) OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + if (NF >= 2) + OS << " NF = " + utostr(getNF()) + ";\n"; if (hasManualCodegen()) { OS << ManualCodegen; OS << "break;\n"; @@ -1076,6 +1085,7 @@ void RVVEmitter::createRVVIntrinsics( StringRef RequiredExtension = R->getValueAsString("RequiredExtension"); StringRef IRName = R->getValueAsString("IRName"); StringRef IRNameMask = R->getValueAsString("IRNameMask"); + unsigned NF = R->getValueAsInt("NF"); StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); bool HasAutoDef = HeaderCodeStr.empty(); @@ -1093,10 +1103,31 @@ void RVVEmitter::createRVVIntrinsics( SmallVector ProtoMaskSeq = ProtoSeq; if (HasMask) { // If HasMaskedOffOperand, insert result type as first input operand. - if (HasMaskedOffOperand) - ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]); - // If HasMask, insert 'm' as first input operand. - ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); + if (HasMaskedOffOperand) { + if (NF == 1) { + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]); + } else { + // Convert + // (void, op0 address, op1 address, ...) + // to + // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...) + for (unsigned I = 0; I < NF; ++I) + ProtoMaskSeq.insert( + ProtoMaskSeq.begin() + NF + 1, + ProtoSeq[1].substr(1)); // Use substr(1) to skip '*' + } + } + if (HasMaskedOffOperand && NF > 1) { + // Convert + // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...) + // to + // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1, + // ...) + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + NF + 1, "m"); + } else { + // If HasMask, insert 'm' as first input operand. + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); + } } // If HasVL, append 'z' to last operand if (HasVL) { @@ -1107,7 +1138,7 @@ void RVVEmitter::createRVVIntrinsics( // Create Intrinsics for each type and LMUL. for (char I : TypeRange) { for (int Log2LMUL : Log2LMULList) { - Optional Types = computeTypes(I, Log2LMUL, ProtoSeq); + Optional Types = computeTypes(I, Log2LMUL, NF, ProtoSeq); // Ignored to create new intrinsic if there are any illegal types. if (!Types.hasValue()) continue; @@ -1118,16 +1149,16 @@ void RVVEmitter::createRVVIntrinsics( Name, SuffixStr, MangledName, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredExtension)); + IntrinsicTypes, RequiredExtension, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = - computeTypes(I, Log2LMUL, ProtoMaskSeq); + computeTypes(I, Log2LMUL, NF, ProtoMaskSeq); Out.push_back(std::make_unique( Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredExtension)); + MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); } } // end for Log2LMULList } // end for TypeRange @@ -1135,8 +1166,12 @@ void RVVEmitter::createRVVIntrinsics( } Optional -RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, +RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, unsigned NF, ArrayRef PrototypeSeq) { + // LMUL x NF must be less than or equal to 8. + if ((Log2LMUL >= 1) && (1 << Log2LMUL) * NF > 8) + return llvm::None; + RVVTypes Types; for (const std::string &Proto : PrototypeSeq) { auto T = computeType(BT, Log2LMUL, Proto);