forked from OSchip/llvm-project
[RISCV] Define vlse/vsse intrinsics.
Define vlse/vsse intrinsics and lower to V instructions. We work with @rogfer01 from BSC to come out this patch. Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com> Co-Authored-by: Zakk Chen <zakk.chen@sifive.com> Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D93445
This commit is contained in:
parent
13261f4c03
commit
4b07c515ef
|
@ -95,6 +95,21 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
// For strided load
|
||||
// Input: (pointer, stride, vl)
|
||||
class RISCVSLoad
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
// For strided load with mask
|
||||
// Input: (maskedoff, pointer, stride, mask, vl)
|
||||
class RISCVSLoadMask
|
||||
: Intrinsic<[llvm_anyvector_ty ],
|
||||
[LLVMMatchType<0>,
|
||||
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
|
||||
// For unit stride store
|
||||
// Input: (vector_in, pointer, vl)
|
||||
class RISCVUSStore
|
||||
|
@ -112,6 +127,22 @@ let TargetPrefix = "riscv" in {
|
|||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
// For strided store
|
||||
// Input: (vector_in, pointer, stride, vl)
|
||||
class RISCVSStore
|
||||
: Intrinsic<[],
|
||||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
// For stride store with mask
|
||||
// Input: (vector_in, pointer, stirde, mask, vl)
|
||||
class RISCVSStoreMask
|
||||
: Intrinsic<[],
|
||||
[llvm_anyvector_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
||||
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
// For destination vector type is the same as first source vector.
|
||||
// Input: (vector_in, vector_in/scalar_in, vl)
|
||||
class RISCVBinaryAAXNoMask
|
||||
|
@ -182,10 +213,18 @@ let TargetPrefix = "riscv" in {
|
|||
def "int_riscv_" # NAME : RISCVUSLoad;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
|
||||
}
|
||||
multiclass RISCVSLoad {
|
||||
def "int_riscv_" # NAME : RISCVSLoad;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
|
||||
}
|
||||
multiclass RISCVUSStore {
|
||||
def "int_riscv_" # NAME : RISCVUSStore;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
|
||||
}
|
||||
multiclass RISCVSStore {
|
||||
def "int_riscv_" # NAME : RISCVSStore;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
|
||||
}
|
||||
|
||||
multiclass RISCVBinaryAAX {
|
||||
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
|
||||
|
@ -207,6 +246,8 @@ let TargetPrefix = "riscv" in {
|
|||
|
||||
defm vle : RISCVUSLoad;
|
||||
defm vse : RISCVUSStore;
|
||||
defm vlse: RISCVSLoad;
|
||||
defm vsse: RISCVSStore;
|
||||
|
||||
defm vadd : RISCVBinaryAAX;
|
||||
defm vsub : RISCVBinaryAAX;
|
||||
|
|
|
@ -310,6 +310,39 @@ class VPseudoUSLoadMask<VReg RetClass>:
|
|||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoSLoadNoMask<VReg RetClass>:
|
||||
Pseudo<(outs RetClass:$rd),
|
||||
(ins GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 1;
|
||||
let mayStore = 0;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Uses = [VL, VTYPE];
|
||||
let VLIndex = 3;
|
||||
let SEWIndex = 4;
|
||||
let HasDummyMask = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoSLoadMask<VReg RetClass>:
|
||||
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
|
||||
(ins GetVRegNoV0<RetClass>.R:$merge,
|
||||
GPR:$rs1, GPR:$rs2,
|
||||
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 1;
|
||||
let mayStore = 0;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Constraints = "$rd = $merge";
|
||||
let Uses = [VL, VTYPE];
|
||||
let VLIndex = 5;
|
||||
let SEWIndex = 6;
|
||||
let MergeOpIndex = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoUSStoreNoMask<VReg StClass>:
|
||||
Pseudo<(outs),
|
||||
(ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
|
@ -339,6 +372,35 @@ class VPseudoUSStoreMask<VReg StClass>:
|
|||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoSStoreNoMask<VReg StClass>:
|
||||
Pseudo<(outs),
|
||||
(ins StClass:$rd, GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 1;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Uses = [VL, VTYPE];
|
||||
let VLIndex = 3;
|
||||
let SEWIndex = 4;
|
||||
let HasDummyMask = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoSStoreMask<VReg StClass>:
|
||||
Pseudo<(outs),
|
||||
(ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 1;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Uses = [VL, VTYPE];
|
||||
let VLIndex = 4;
|
||||
let SEWIndex = 5;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoBinaryNoMask<VReg RetClass,
|
||||
VReg Op1Class,
|
||||
DAGOperand Op2Class,
|
||||
|
@ -415,6 +477,17 @@ multiclass VPseudoUSLoad {
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoSLoad {
|
||||
foreach lmul = MxList.m in {
|
||||
defvar LInfo = lmul.MX;
|
||||
defvar vreg = lmul.vrclass;
|
||||
let VLMul = lmul.value in {
|
||||
def "_V_" # LInfo : VPseudoSLoadNoMask<vreg>;
|
||||
def "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoUSStore {
|
||||
foreach lmul = MxList.m in {
|
||||
defvar LInfo = lmul.MX;
|
||||
|
@ -426,6 +499,17 @@ multiclass VPseudoUSStore {
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoSStore {
|
||||
foreach lmul = MxList.m in {
|
||||
defvar LInfo = lmul.MX;
|
||||
defvar vreg = lmul.vrclass;
|
||||
let VLMul = lmul.value in {
|
||||
def "_V_" # LInfo : VPseudoSStoreNoMask<vreg>;
|
||||
def "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoBinary<VReg RetClass,
|
||||
VReg Op1Class,
|
||||
DAGOperand Op2Class,
|
||||
|
@ -720,6 +804,26 @@ multiclass VPatUSLoad<string intrinsic,
|
|||
$rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
|
||||
}
|
||||
|
||||
multiclass VPatSLoad<string intrinsic,
|
||||
string inst,
|
||||
LLVMType type,
|
||||
LLVMType mask_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg reg_class>
|
||||
{
|
||||
defvar Intr = !cast<Intrinsic>(intrinsic);
|
||||
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
|
||||
def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, GPR:$vl)),
|
||||
(Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
|
||||
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
|
||||
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
|
||||
def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
|
||||
GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl)),
|
||||
(PseudoMask $merge,
|
||||
$rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
|
||||
}
|
||||
|
||||
multiclass VPatUSStore<string intrinsic,
|
||||
string inst,
|
||||
LLVMType type,
|
||||
|
@ -738,6 +842,24 @@ multiclass VPatUSStore<string intrinsic,
|
|||
(PseudoMask $rs3, $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
|
||||
}
|
||||
|
||||
multiclass VPatSStore<string intrinsic,
|
||||
string inst,
|
||||
LLVMType type,
|
||||
LLVMType mask_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg reg_class>
|
||||
{
|
||||
defvar Intr = !cast<Intrinsic>(intrinsic);
|
||||
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
|
||||
def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, GPR:$vl),
|
||||
(Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
|
||||
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
|
||||
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
|
||||
def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl),
|
||||
(PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
|
||||
}
|
||||
|
||||
multiclass VPatBinary<string intrinsic,
|
||||
string inst,
|
||||
string kind,
|
||||
|
@ -1081,6 +1203,16 @@ foreach eew = EEWList in {
|
|||
defm PseudoVSE # eew : VPseudoUSStore;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 7.5 Vector Strided Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Vector Strided Loads and Stores
|
||||
foreach eew = EEWList in {
|
||||
defm PseudoVLSE # eew : VPseudoSLoad;
|
||||
defm PseudoVSSE # eew : VPseudoSStore;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Pseudo Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -1207,6 +1339,20 @@ foreach vti = AllVectors in
|
|||
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 7.5 Vector Strided Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
foreach vti = AllVectors in
|
||||
{
|
||||
defm : VPatSLoad<"int_riscv_vlse",
|
||||
"PseudoVLSE" # vti.SEW,
|
||||
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
|
||||
defm : VPatSStore<"int_riscv_vsse",
|
||||
"PseudoVSSE" # vti.SEW,
|
||||
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 12. Vector Integer Arithmetic Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue