[RISCV][Clang] Add more RVV load/store intrinsic functions.

Support the following instructions.
1. Mask load and store
2. Vector Strided Instructions
3. Vector Indexed Store Instructions

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen@sifive.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D99965
This commit is contained in:
Zakk Chen 2021-04-11 07:25:06 -07:00
parent c680b0dabf
commit e5a8219264
12 changed files with 33246 additions and 265 deletions

View File

@ -499,49 +499,31 @@ class IsFloat<string type> {
bit val = !or(!eq(type, "h"), !eq(type, "f"), !eq(type, "d"));
}
multiclass RVVVLEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vle",
IRNameMask ="vle_mask",
HasNoMaskedOverloaded = false,
ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[1]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
}],
ManualCodegenMask= [{
IntrinsicTypes = {ResultType, Ops[3]->getType()};
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
}] in {
foreach type = types in {
def : RVVBuiltin<"v", "vPCe", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "UvPCUe", type>;
}
}
let HasNoMaskedOverloaded = false,
ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[1]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
}],
ManualCodegenMask= [{
IntrinsicTypes = {ResultType, Ops[3]->getType()};
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
}] in {
class RVVVLEMaskBuiltin : RVVBuiltin<"m", "mPCUe", "c"> {
let Name = "vle1_v";
let IRName = "vle1";
let HasMask = false;
}
}
multiclass RVVIndexedLoad<string op> {
let ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
}],
ManualCodegenMask = [{
IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()};
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
}] in {
foreach type = TypeList in {
foreach eew_list = EEWList in {
defvar eew = eew_list[0];
defvar eew_type = eew_list[1];
let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
def: RVVBuiltin<"v", "vPCe" # eew_type # "Uv", type>;
if !not(IsFloat<type>.val) then {
def: RVVBuiltin<"Uv", "UvPCUe" # eew_type # "Uv", type>;
}
}
multiclass RVVVLEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vle",
IRNameMask ="vle_mask" in {
foreach type = types in {
def : RVVBuiltin<"v", "vPCe", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "UvPCUe", type>;
}
}
}
}
}
@ -594,29 +576,130 @@ multiclass RVVVLEFFBuiltin<list<string> types> {
}
}
multiclass RVVVSEBuiltin<list<string> types> {
multiclass RVVVLSEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vse",
IRNameMask = "vse_mask",
IRName = "vlse",
IRNameMask ="vlse_mask",
HasNoMaskedOverloaded = false,
ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[2]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
}],
ManualCodegenMask= [{
IntrinsicTypes = {ResultType, Ops[4]->getType()};
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
}] in {
foreach type = types in {
def : RVVBuiltin<"v", "vPCet", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "UvPCUet", type>;
}
}
}
}
multiclass RVVIndexedLoad<string op> {
let ManualCodegen = [{
IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
}],
ManualCodegenMask = [{
IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()};
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
}] in {
foreach type = TypeList in {
foreach eew_list = EEWList in {
defvar eew = eew_list[0];
defvar eew_type = eew_list[1];
let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
def: RVVBuiltin<"v", "vPCe" # eew_type # "Uv", type>;
if !not(IsFloat<type>.val) then {
def: RVVBuiltin<"Uv", "UvPCUe" # eew_type # "Uv", type>;
}
}
}
}
}
}
let HasMaskedOffOperand = false,
PermuteOperands = [1, 0], // C/C++ Operand: (ptr, value, vl). Builtin: (value, ptr, vl)
ManualCodegen = [{
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()};
}],
ManualCodegenMask= [{
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
}] in {
class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0mPUe", "c"> {
let Name = "vse1_v";
let IRName = "vse1";
let HasMask = false;
}
multiclass RVVVSEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vse",
IRNameMask = "vse_mask" in {
foreach type = types in {
def : RVVBuiltin<"v", "0vPe", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "0UvPUe", type>;
}
}
}
}
}
multiclass RVVVSSEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vsse",
IRNameMask = "vsse_mask",
HasMaskedOffOperand = false,
PermuteOperands = [1, 0], // C/C++ Operand: (ptr, value, vl). Builtin: (value, ptr, vl)
PermuteOperands = [1, 2, 0], // C/C++ Operand: (ptr, stride, value, vl). Builtin: (value, ptr, stride, vl)
ManualCodegen = [{
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()};
IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
}],
ManualCodegenMask= [{
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()};
}] in {
foreach type = types in {
def : RVVBuiltin<"v", "0vPe", type>;
def : RVVBuiltin<"v", "0vPet", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "0UvPUe", type>;
def : RVVBuiltin<"Uv", "0UvPUet", type>;
}
}
}
}
multiclass RVVIndexedStore<string op> {
let HasMaskedOffOperand = false,
PermuteOperands = [1, 2, 0], // C/C++ Operand: (ptr, index, value, vl). Builtin: (value, ptr, index, vl)
ManualCodegen = [{
Ops[1] = Builder.CreateBitCast(Ops[1],Ops[0]->getType()->getPointerTo());
IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()};
}],
ManualCodegenMask= [{
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
}] in {
foreach type = TypeList in {
foreach eew_list = EEWList in {
defvar eew = eew_list[0];
defvar eew_type = eew_list[1];
let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
def : RVVBuiltin<"v", "0vPe" # eew_type # "Uv", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "0UvPUe" # eew_type # "Uv", type>;
}
}
}
}
}
}
// 6. Configuration-Setting Instructions
// 6.1. vsetvli/vsetvl instructions
let HasVL = false,
@ -691,25 +774,42 @@ let HasVL = false,
// 7. Vector Loads and Stores
// 7.4. Vector Unit-Stride Instructions
def vle1: RVVVLEMaskBuiltin;
defm vle8: RVVVLEBuiltin<["c"]>;
defm vle16: RVVVLEBuiltin<["s"]>;
defm vle32: RVVVLEBuiltin<["i","f"]>;
defm vle64: RVVVLEBuiltin<["l","d"]>;
defm vle8ff: RVVVLEFFBuiltin<["c"]>;
defm vle16ff: RVVVLEFFBuiltin<["s"]>;
defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
def vse1 : RVVVSEMaskBuiltin;
defm vse8 : RVVVSEBuiltin<["c"]>;
defm vse16: RVVVSEBuiltin<["s"]>;
defm vse32: RVVVSEBuiltin<["i","f"]>;
defm vse64: RVVVSEBuiltin<["l","d"]>;
// 7.5. Vector Strided Instructions
defm vlse8: RVVVLSEBuiltin<["c"]>;
defm vlse16: RVVVLSEBuiltin<["s"]>;
defm vlse32: RVVVLSEBuiltin<["i","f"]>;
defm vlse64: RVVVLSEBuiltin<["l","d"]>;
defm vsse8 : RVVVSSEBuiltin<["c"]>;
defm vsse16: RVVVSSEBuiltin<["s"]>;
defm vsse32: RVVVSSEBuiltin<["i","f"]>;
defm vsse64: RVVVSSEBuiltin<["l","d"]>;
// 7.6. Vector Indexed Instructions
defm : RVVIndexedLoad<"vluxei">;
defm : RVVIndexedLoad<"vloxei">;
defm : RVVIndexedStore<"vsuxei">;
defm : RVVIndexedStore<"vsoxei">;
// 7.7. Unit-stride Fault-Only-First Loads
defm vle8ff: RVVVLEFFBuiltin<["c"]>;
defm vle16ff: RVVVLEFFBuiltin<["s"]>;
defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
defm vadd : RVVIntBinBuiltinSet;

View File

@ -0,0 +1,965 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vlse8_v_i8mf8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_i8mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_i8mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_i8m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_i8m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_i8m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_i8m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
const int8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_i16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
const int16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_i16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
const int16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_i16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
const int16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_i16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
const int16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_i16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
const int16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_i16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
const int16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_i32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
const int32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_i32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
const int32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_i32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
const int32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_i32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
const int32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_i32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
const int32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_i64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
const int64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_i64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
const int64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_i64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
const int64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_i64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
const int64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8mf8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
//
vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
//
vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
//
vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
//
vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
//
vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
//
vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse8_v_u8m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
//
vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
const uint8_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse8(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_u16mf4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
//
vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
const uint16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_u16mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
//
vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
const uint16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_u16m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
//
vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
const uint16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_u16m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
const uint16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_u16m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
//
vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
const uint16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse16_v_u16m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
//
vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
const uint16_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse16(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_u32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
//
vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
const uint32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_u32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
//
vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
const uint32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_u32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
const uint32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_u32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
//
vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
const uint32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_u32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
//
vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
const uint32_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_u64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
//
vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
const uint64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_u64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
const uint64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_u64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
//
vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
const uint64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_u64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
//
vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
const uint64_t *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_f32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
//
vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
const float *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
//
vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
const float *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_f32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
const float *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_f32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
//
vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
const float *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse32_v_f32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
//
vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
const float *base, ptrdiff_t bstride,
size_t vl) {
return vlse32(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
//
vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
const double *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_f64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
const double *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_f64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
//
vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
const double *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}
// CHECK-RV32-LABEL: @test_vlse64_v_f64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
//
vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
const double *base, ptrdiff_t bstride,
size_t vl) {
return vlse64(mask, maskedoff, base, bstride, vl);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1705,3 +1705,115 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
return vle64_v_f64m8_m(mask, maskedoff, base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1.i32(<vscale x 64 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1.i64(<vscale x 64 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
vbool1_t test_vle1_v_b1(const uint8_t *base, size_t vl) {
return vle1_v_b1(base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1.i32(<vscale x 32 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1.i64(<vscale x 32 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP1]]
//
vbool2_t test_vle1_v_b2(const uint8_t *base, size_t vl) {
return vle1_v_b2(base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1.i32(<vscale x 16 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1.i64(<vscale x 16 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP1]]
//
vbool4_t test_vle1_v_b4(const uint8_t *base, size_t vl) {
return vle1_v_b4(base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1.i32(<vscale x 8 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1.i64(<vscale x 8 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP1]]
//
vbool8_t test_vle1_v_b8(const uint8_t *base, size_t vl) {
return vle1_v_b8(base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b16(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1.i32(<vscale x 4 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1.i64(<vscale x 4 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP1]]
//
vbool16_t test_vle1_v_b16(const uint8_t *base, size_t vl) {
return vle1_v_b16(base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b32(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1.i32(<vscale x 2 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1.i64(<vscale x 2 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP1]]
//
vbool32_t test_vle1_v_b32(const uint8_t *base, size_t vl) {
return vle1_v_b32(base, vl);
}
// CHECK-RV32-LABEL: @test_vle1_v_b64(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1.i32(<vscale x 1 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP1]]
//
// CHECK-RV64-LABEL: @test_vle1_v_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1.i64(<vscale x 1 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP1]]
//
vbool64_t test_vle1_v_b64(const uint8_t *base, size_t vl) {
return vle1_v_b64(base, vl);
}

File diff suppressed because it is too large Load Diff

View File

@ -1705,3 +1705,115 @@ void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size
void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) {
return vse64_v_f64m8_m(mask, base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv64i1.i32(<vscale x 64 x i1> [[VALUE:%.*]], <vscale x 64 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64(<vscale x 64 x i1> [[VALUE:%.*]], <vscale x 64 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) {
return vse1_v_b1(base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv32i1.i32(<vscale x 32 x i1> [[VALUE:%.*]], <vscale x 32 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64(<vscale x 32 x i1> [[VALUE:%.*]], <vscale x 32 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) {
return vse1_v_b2(base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv16i1.i32(<vscale x 16 x i1> [[VALUE:%.*]], <vscale x 16 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64(<vscale x 16 x i1> [[VALUE:%.*]], <vscale x 16 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) {
return vse1_v_b4(base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv8i1.i32(<vscale x 8 x i1> [[VALUE:%.*]], <vscale x 8 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64(<vscale x 8 x i1> [[VALUE:%.*]], <vscale x 8 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) {
return vse1_v_b8(base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b16(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv4i1.i32(<vscale x 4 x i1> [[VALUE:%.*]], <vscale x 4 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64(<vscale x 4 x i1> [[VALUE:%.*]], <vscale x 4 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) {
return vse1_v_b16(base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b32(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv2i1.i32(<vscale x 2 x i1> [[VALUE:%.*]], <vscale x 2 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64(<vscale x 2 x i1> [[VALUE:%.*]], <vscale x 2 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) {
return vse1_v_b32(base, value, vl);
}
// CHECK-RV32-LABEL: @test_vse1_v_b64(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv1i1.i32(<vscale x 1 x i1> [[VALUE:%.*]], <vscale x 1 x i1>* [[TMP0]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret void
//
// CHECK-RV64-LABEL: @test_vse1_v_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64(<vscale x 1 x i1> [[VALUE:%.*]], <vscale x 1 x i1>* [[TMP0]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret void
//
void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) {
return vse1_v_b64(base, value, vl);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff