[RISCV] Split zvlsseg searchable table into 4 separate tables. Index by properties rather than intrinsic ID.

Intrinsic ID is a 32-bit value which made each row of the table 4
byte aligned. The remaining fields used 5 bytes. This meant 3 bytes
of padding per row.

This patch breaks the table into 4 separate tables and indexes them
by properties we know about the intrinsic. NF, masked,
strided, ordered, etc. The indexed load/store tables have no
padding in their rows now.

All together this reduces the size of llc binary by ~28K.

I'm considering adding similar tables for isel of non-segment
load/store as well to cut down the size of the isel table and
probably improve our isel performance. Those tables would need to
indexed from intrinsics, IR loads/stores, gathers/scatters, and
RISCVISD opcodes. So having a table that can be indexed without using
intrinsic ID is more flexible.

Reviewed By: HsiangKai

Differential Revision: https://reviews.llvm.org/D96894
This commit is contained in:
Craig Topper 2021-02-18 19:00:48 -08:00
parent cf34559104
commit 8ed3bbbcc3
3 changed files with 213 additions and 137 deletions

View File

@ -25,21 +25,15 @@ using namespace llvm;
#define DEBUG_TYPE "riscv-isel"
namespace RISCVZvlssegTable {
struct RISCVZvlsseg {
unsigned IntrinsicID;
uint8_t SEW;
uint8_t LMUL;
uint8_t IndexLMUL;
uint16_t Pseudo;
};
using namespace RISCV;
#define GET_RISCVZvlssegTable_IMPL
namespace llvm {
namespace RISCV {
#define GET_RISCVVSSEGTable_IMPL
#define GET_RISCVVLSEGTable_IMPL
#define GET_RISCVVLXSEGTable_IMPL
#define GET_RISCVVSXSEGTable_IMPL
#include "RISCVGenSearchableTables.inc"
} // namespace RISCVZvlssegTable
} // namespace RISCV
} // namespace llvm
void RISCVDAGToDAGISel::PostprocessISelDAG() {
doPeepholeLoadStoreADDI();
@ -184,7 +178,7 @@ static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
}
}
void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
bool IsStrided) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
@ -210,9 +204,9 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
Operands.push_back(Node->getOperand(CurOp++)); // VL.
Operands.push_back(SEW);
Operands.push_back(Node->getOperand(0)); // Chain.
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, ScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(RISCVVLMUL::LMUL_1));
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
static_cast<unsigned>(LMUL));
SDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
SDValue SuperReg = SDValue(Load, 0);
@ -227,7 +221,6 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
SDLoc DL(Node);
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
MVT VT = Node->getSimpleValueType(0);
MVT XLenVT = Subtarget->getXLenVT();
@ -250,9 +243,9 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
Operands.push_back(Node->getOperand(CurOp++)); // VL.
Operands.push_back(SEW);
Operands.push_back(Node->getOperand(0)); // Chain.
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, ScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(RISCVVLMUL::LMUL_1));
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
ScalarSize, static_cast<unsigned>(LMUL));
SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other,
MVT::Glue, Operands);
SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
@ -269,8 +262,8 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
CurDAG->RemoveDeadNode(Node);
}
void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo,
bool IsMasked) {
void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
bool IsOrdered) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
MVT VT = Node->getSimpleValueType(0);
@ -298,8 +291,8 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo,
RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, IndexScalarSize, static_cast<unsigned>(LMUL),
const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
SDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
@ -313,7 +306,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo,
CurDAG->RemoveDeadNode(Node);
}
void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
bool IsStrided) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 4;
@ -339,16 +332,15 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
Operands.push_back(Node->getOperand(CurOp++)); // VL.
Operands.push_back(SEW);
Operands.push_back(Node->getOperand(0)); // Chain.
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, ScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(RISCVVLMUL::LMUL_1));
const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
SDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
ReplaceNode(Node, Store);
}
void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo,
bool IsMasked) {
void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
bool IsOrdered) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 5;
if (IsMasked)
@ -374,8 +366,8 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo,
RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, IndexScalarSize, static_cast<unsigned>(LMUL),
const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
SDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
@ -575,7 +567,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vlseg6:
case Intrinsic::riscv_vlseg7:
case Intrinsic::riscv_vlseg8: {
selectVLSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ false);
selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vlseg2_mask:
@ -585,7 +577,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vlseg6_mask:
case Intrinsic::riscv_vlseg7_mask:
case Intrinsic::riscv_vlseg8_mask: {
selectVLSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ false);
selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vlsseg2:
@ -595,7 +587,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vlsseg6:
case Intrinsic::riscv_vlsseg7:
case Intrinsic::riscv_vlsseg8: {
selectVLSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ true);
selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vlsseg2_mask:
@ -605,7 +597,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vlsseg6_mask:
case Intrinsic::riscv_vlsseg7_mask:
case Intrinsic::riscv_vlsseg8_mask: {
selectVLSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ true);
selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vloxseg2:
@ -615,16 +607,17 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vloxseg6:
case Intrinsic::riscv_vloxseg7:
case Intrinsic::riscv_vloxseg8:
selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vluxseg2:
case Intrinsic::riscv_vluxseg3:
case Intrinsic::riscv_vluxseg4:
case Intrinsic::riscv_vluxseg5:
case Intrinsic::riscv_vluxseg6:
case Intrinsic::riscv_vluxseg7:
case Intrinsic::riscv_vluxseg8: {
selectVLXSEG(Node, IntNo, /*IsMasked*/ false);
case Intrinsic::riscv_vluxseg8:
selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
return;
}
case Intrinsic::riscv_vloxseg2_mask:
case Intrinsic::riscv_vloxseg3_mask:
case Intrinsic::riscv_vloxseg4_mask:
@ -632,16 +625,17 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vloxseg6_mask:
case Intrinsic::riscv_vloxseg7_mask:
case Intrinsic::riscv_vloxseg8_mask:
selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vluxseg2_mask:
case Intrinsic::riscv_vluxseg3_mask:
case Intrinsic::riscv_vluxseg4_mask:
case Intrinsic::riscv_vluxseg5_mask:
case Intrinsic::riscv_vluxseg6_mask:
case Intrinsic::riscv_vluxseg7_mask:
case Intrinsic::riscv_vluxseg8_mask: {
selectVLXSEG(Node, IntNo, /*IsMasked*/ true);
case Intrinsic::riscv_vluxseg8_mask:
selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
}
case Intrinsic::riscv_vlseg8ff:
case Intrinsic::riscv_vlseg7ff:
case Intrinsic::riscv_vlseg6ff:
@ -675,7 +669,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vsseg6:
case Intrinsic::riscv_vsseg7:
case Intrinsic::riscv_vsseg8: {
selectVSSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ false);
selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vsseg2_mask:
@ -685,7 +679,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vsseg6_mask:
case Intrinsic::riscv_vsseg7_mask:
case Intrinsic::riscv_vsseg8_mask: {
selectVSSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ false);
selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vssseg2:
@ -695,7 +689,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vssseg6:
case Intrinsic::riscv_vssseg7:
case Intrinsic::riscv_vssseg8: {
selectVSSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ true);
selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vssseg2_mask:
@ -705,7 +699,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vssseg6_mask:
case Intrinsic::riscv_vssseg7_mask:
case Intrinsic::riscv_vssseg8_mask: {
selectVSSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ true);
selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vsoxseg2:
@ -715,16 +709,17 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vsoxseg6:
case Intrinsic::riscv_vsoxseg7:
case Intrinsic::riscv_vsoxseg8:
selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vsuxseg2:
case Intrinsic::riscv_vsuxseg3:
case Intrinsic::riscv_vsuxseg4:
case Intrinsic::riscv_vsuxseg5:
case Intrinsic::riscv_vsuxseg6:
case Intrinsic::riscv_vsuxseg7:
case Intrinsic::riscv_vsuxseg8: {
selectVSXSEG(Node, IntNo, /*IsMasked*/ false);
case Intrinsic::riscv_vsuxseg8:
selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
return;
}
case Intrinsic::riscv_vsoxseg2_mask:
case Intrinsic::riscv_vsoxseg3_mask:
case Intrinsic::riscv_vsoxseg4_mask:
@ -732,17 +727,18 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vsoxseg6_mask:
case Intrinsic::riscv_vsoxseg7_mask:
case Intrinsic::riscv_vsoxseg8_mask:
selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vsuxseg2_mask:
case Intrinsic::riscv_vsuxseg3_mask:
case Intrinsic::riscv_vsuxseg4_mask:
case Intrinsic::riscv_vsuxseg5_mask:
case Intrinsic::riscv_vsuxseg6_mask:
case Intrinsic::riscv_vsuxseg7_mask:
case Intrinsic::riscv_vsuxseg8_mask: {
selectVSXSEG(Node, IntNo, /*IsMasked*/ true);
case Intrinsic::riscv_vsuxseg8_mask:
selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
}
}
break;
}
case ISD::BITCAST:

View File

@ -73,11 +73,11 @@ public:
return selectRVVUimm5(N, Width, Imm);
}
void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked, bool IsStrided);
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided);
void selectVLSEGFF(SDNode *Node, bool IsMasked);
void selectVLXSEG(SDNode *Node, unsigned IntNo, bool IsMasked);
void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsMasked, bool IsStrided);
void selectVSXSEG(SDNode *Node, unsigned IntNo, bool IsMasked);
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided);
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
// Include the pieces autogenerated from the target description.
#include "RISCVGenDAGISel.inc"
@ -85,6 +85,54 @@ public:
private:
void doPeepholeLoadStoreADDI();
};
}
namespace RISCV {
struct VLSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Strided;
uint8_t FF;
uint8_t SEW;
uint8_t LMUL;
uint16_t Pseudo;
};
struct VLXSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Ordered;
uint8_t SEW;
uint8_t LMUL;
uint8_t IndexLMUL;
uint16_t Pseudo;
};
struct VSSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Strided;
uint8_t SEW;
uint8_t LMUL;
uint16_t Pseudo;
};
struct VSXSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Ordered;
uint8_t SEW;
uint8_t LMUL;
uint8_t IndexLMUL;
uint16_t Pseudo;
};
#define GET_RISCVVSSEGTable_DECL
#define GET_RISCVVLSEGTable_DECL
#define GET_RISCVVLXSEGTable_DECL
#define GET_RISCVVSXSEGTable_DECL
#include "RISCVGenSearchableTables.inc"
} // namespace RISCV
} // namespace llvm
#endif

View File

@ -413,19 +413,75 @@ def RISCVVIntrinsicsTable : GenericTable {
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
class RISCVZvlsseg<string IntrName, bits<7> S, bits<3> L, bits<3> IL = V_M1.value> {
Intrinsic IntrinsicID = !cast<Intrinsic>(IntrName);
class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Strided = Str;
bits<1> FF = F;
bits<7> SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVLSEGTable : GenericTable {
let FilterClass = "RISCVVLSEG";
let CppTypeName = "VLSEGPseudo";
let Fields = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL"];
let PrimaryKeyName = "getVLSEGPseudo";
}
class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Ordered = O;
bits<7> SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVZvlssegTable : GenericTable {
let FilterClass = "RISCVZvlsseg";
let Fields = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
let PrimaryKey = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getPseudo";
def RISCVVLXSEGTable : GenericTable {
let FilterClass = "RISCVVLXSEG";
let CppTypeName = "VLXSEGPseudo";
let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getVLXSEGPseudo";
}
class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<7> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Strided = Str;
bits<7> SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVSSEGTable : GenericTable {
let FilterClass = "RISCVVSSEG";
let CppTypeName = "VSSEGPseudo";
let Fields = ["NF", "Masked", "Strided", "SEW", "LMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Strided", "SEW", "LMUL"];
let PrimaryKeyName = "getVSSEGPseudo";
}
class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Ordered = O;
bits<7> SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVSXSEGTable : GenericTable {
let FilterClass = "RISCVVSXSEG";
let CppTypeName = "VSXSEGPseudo";
let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getVSXSEGPseudo";
}
//===----------------------------------------------------------------------===//
@ -455,35 +511,6 @@ class PseudoToVInst<string PseudoInst> {
!subst("Pseudo", "", PseudoInst))))))))))))))))))));
}
class ToLowerCase<string Upper> {
string L = !subst("FF", "ff",
!subst("VLSEG", "vlseg",
!subst("VLSSEG", "vlsseg",
!subst("VSSEG", "vsseg",
!subst("VSSSEG", "vssseg",
!subst("VLOXSEG", "vloxseg",
!subst("VLUXSEG", "vluxseg",
!subst("VSOXSEG", "vsoxseg",
!subst("VSUXSEG", "vsuxseg", Upper)))))))));
}
// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
// Example: PseudoVLSEG2E32_V_M2_MASK -> int_riscv_vlseg2_mask
class PseudoToIntrinsic<string PseudoInst, bit IsMasked> {
string Intrinsic = !strconcat("int_riscv_",
ToLowerCase<
!subst("E8", "",
!subst("E16", "",
!subst("E32", "",
!subst("E64", "",
!subst("EI8", "",
!subst("EI16", "",
!subst("EI32", "",
!subst("EI64", "",
!subst("_V", "", PseudoToVInst<PseudoInst>.VInst)))))))))>.L,
!if(IsMasked, "_mask", ""));
}
// The destination vector register group for a masked vector instruction cannot
// overlap the source mask register (v0), unless the destination vector register
// is being written with a mask value (e.g., comparisons) or the scalar result
@ -1013,11 +1040,11 @@ multiclass VPseudoAMO {
defm "EI" # eew : VPseudoAMOEI<eew>;
}
class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW>:
class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@ -1029,12 +1056,12 @@ class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW>:
class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@ -1047,11 +1074,12 @@ class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW>:
class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$offset, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, EEW, VLMul> {
let mayLoad = 1;
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@ -1063,12 +1091,12 @@ class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW>:
class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
GPR:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, EEW, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@ -1081,11 +1109,12 @@ class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$offset, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
RISCVVLXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@ -1100,12 +1129,13 @@ class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> L
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
IdxClass:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
RISCVVLXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@ -1120,11 +1150,11 @@ class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMU
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW>:
class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@ -1136,12 +1166,12 @@ class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW>:
class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@ -1152,11 +1182,11 @@ class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW>:
class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@ -1168,12 +1198,12 @@ class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW>:
class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@ -1184,12 +1214,13 @@ class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
RISCVVSXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@ -1201,12 +1232,13 @@ class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3>
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
RISCVVSXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@ -1889,9 +1921,9 @@ multiclass VPseudoUSSegLoad<bit isFF> {
defvar vreg = SegRegClass<lmul, nf>.RC;
defvar FFStr = !if(isFF, "FF", "");
def nf # "E" # eew # FFStr # "_V_" # LInfo :
VPseudoUSSegLoadNoMask<vreg, eew>;
VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>;
def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
VPseudoUSSegLoadMask<vreg, eew>;
VPseudoUSSegLoadMask<vreg, eew, nf, isFF>;
}
}
}
@ -1905,15 +1937,15 @@ multiclass VPseudoSSegLoad {
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew>;
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
}
}
}
}
}
multiclass VPseudoISegLoad {
multiclass VPseudoISegLoad<bit Ordered> {
foreach idx_eew = EEWList in { // EEW for index argument.
foreach idx_lmul = MxSet<idx_eew>.m in { // LMUL for index argument.
foreach val_lmul = MxList.m in { // LMUL for the value.
@ -1924,9 +1956,9 @@ multiclass VPseudoISegLoad {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
}
}
}
@ -1941,8 +1973,8 @@ multiclass VPseudoUSSegStore {
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew>;
def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
}
}
}
@ -1956,15 +1988,15 @@ multiclass VPseudoSSegStore {
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew>;
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
}
}
}
}
}
multiclass VPseudoISegStore {
multiclass VPseudoISegStore<bit Ordered> {
foreach idx_eew = EEWList in { // EEW for index argument.
foreach idx_lmul = MxSet<idx_eew>.m in { // LMUL for index argument.
foreach val_lmul = MxList.m in { // LMUL for the value.
@ -1975,9 +2007,9 @@ multiclass VPseudoISegStore {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
}
}
}
@ -3235,12 +3267,12 @@ defm PseudoVL : VPseudoUSLoad</*isFF=*/true>;
//===----------------------------------------------------------------------===//
defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/false>;
defm PseudoVLSSEG : VPseudoSSegLoad;
defm PseudoVLOXSEG : VPseudoISegLoad;
defm PseudoVLUXSEG : VPseudoISegLoad;
defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
defm PseudoVSSEG : VPseudoUSSegStore;
defm PseudoVSSSEG : VPseudoSSegStore;
defm PseudoVSOXSEG : VPseudoISegStore;
defm PseudoVSUXSEG : VPseudoISegStore;
defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
// vlseg<nf>e<eew>ff.v may update VL register
let hasSideEffects = 1, Defs = [VL] in