forked from OSchip/llvm-project
[RISCV] Move instruction information into the RISCVII namespace (NFC)
Move instruction attributes into the `RISCVII` namespace and add associated helper functions. Differential Revision: https://reviews.llvm.org/D102268
This commit is contained in:
parent
1556540372
commit
3a64b7080d
|
@ -1589,9 +1589,9 @@ OperandMatchResultTy RISCVAsmParser::parseVTypeI(OperandVector &Operands) {
|
|||
|
||||
unsigned SewLog2 = Log2_32(Sew / 8);
|
||||
unsigned LmulLog2 = Log2_32(Lmul);
|
||||
RISCVVSEW VSEW = static_cast<RISCVVSEW>(SewLog2);
|
||||
RISCVVLMUL VLMUL =
|
||||
static_cast<RISCVVLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
|
||||
RISCVII::VSEW VSEW = static_cast<RISCVII::VSEW>(SewLog2);
|
||||
RISCVII::VLMUL VLMUL =
|
||||
static_cast<RISCVII::VLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
|
||||
|
||||
unsigned VTypeI =
|
||||
RISCVVType::encodeVTYPE(VLMUL, VSEW, TailAgnostic, MaskAgnostic);
|
||||
|
@ -2479,8 +2479,8 @@ bool RISCVAsmParser::validateInstruction(MCInst &Inst,
|
|||
}
|
||||
|
||||
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
|
||||
unsigned Constraints =
|
||||
(MCID.TSFlags & RISCVII::ConstraintMask) >> RISCVII::ConstraintShift;
|
||||
RISCVII::VConstraintType Constraints =
|
||||
RISCVII::getConstraint(MCID.TSFlags);
|
||||
if (Constraints == RISCVII::NoConstraint)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -99,26 +99,26 @@ void validate(const Triple &TT, const FeatureBitset &FeatureBits) {
|
|||
} // namespace RISCVFeatures
|
||||
|
||||
void RISCVVType::printVType(unsigned VType, raw_ostream &OS) {
|
||||
RISCVVSEW VSEW = getVSEW(VType);
|
||||
RISCVVLMUL VLMUL = getVLMUL(VType);
|
||||
RISCVII::VSEW VSEW = getVSEW(VType);
|
||||
RISCVII::VLMUL VLMUL = getVLMUL(VType);
|
||||
|
||||
unsigned Sew = 1 << (static_cast<unsigned>(VSEW) + 3);
|
||||
OS << "e" << Sew;
|
||||
|
||||
switch (VLMUL) {
|
||||
case RISCVVLMUL::LMUL_RESERVED:
|
||||
case RISCVII::VLMUL::LMUL_RESERVED:
|
||||
llvm_unreachable("Unexpected LMUL value!");
|
||||
case RISCVVLMUL::LMUL_1:
|
||||
case RISCVVLMUL::LMUL_2:
|
||||
case RISCVVLMUL::LMUL_4:
|
||||
case RISCVVLMUL::LMUL_8: {
|
||||
case RISCVII::VLMUL::LMUL_1:
|
||||
case RISCVII::VLMUL::LMUL_2:
|
||||
case RISCVII::VLMUL::LMUL_4:
|
||||
case RISCVII::VLMUL::LMUL_8: {
|
||||
unsigned LMul = 1 << static_cast<unsigned>(VLMUL);
|
||||
OS << ",m" << LMul;
|
||||
break;
|
||||
}
|
||||
case RISCVVLMUL::LMUL_F2:
|
||||
case RISCVVLMUL::LMUL_F4:
|
||||
case RISCVVLMUL::LMUL_F8: {
|
||||
case RISCVII::VLMUL::LMUL_F2:
|
||||
case RISCVII::VLMUL::LMUL_F4:
|
||||
case RISCVII::VLMUL::LMUL_F8: {
|
||||
unsigned LMul = 1 << (8 - static_cast<unsigned>(VLMUL));
|
||||
OS << ",mf" << LMul;
|
||||
break;
|
||||
|
|
|
@ -45,8 +45,9 @@ enum {
|
|||
InstFormatOther = 17,
|
||||
|
||||
InstFormatMask = 31,
|
||||
InstFormatShift = 0,
|
||||
|
||||
ConstraintShift = 5,
|
||||
ConstraintShift = InstFormatShift + 5,
|
||||
ConstraintMask = 0b111 << ConstraintShift,
|
||||
|
||||
VLMulShift = ConstraintShift + 3,
|
||||
|
@ -78,13 +79,70 @@ enum {
|
|||
};
|
||||
|
||||
// Match with the definitions in RISCVInstrFormatsV.td
|
||||
enum RVVConstraintType {
|
||||
enum VConstraintType {
|
||||
NoConstraint = 0,
|
||||
VS2Constraint = 0b001,
|
||||
VS1Constraint = 0b010,
|
||||
VMConstraint = 0b100,
|
||||
};
|
||||
|
||||
enum VSEW {
|
||||
SEW_8 = 0,
|
||||
SEW_16,
|
||||
SEW_32,
|
||||
SEW_64,
|
||||
SEW_128,
|
||||
SEW_256,
|
||||
SEW_512,
|
||||
SEW_1024,
|
||||
};
|
||||
|
||||
enum VLMUL {
|
||||
LMUL_1 = 0,
|
||||
LMUL_2,
|
||||
LMUL_4,
|
||||
LMUL_8,
|
||||
LMUL_RESERVED,
|
||||
LMUL_F8,
|
||||
LMUL_F4,
|
||||
LMUL_F2
|
||||
};
|
||||
|
||||
// Helper functions to read TSFlags.
|
||||
/// \returns the format of the instruction.
|
||||
static inline unsigned getFormat(uint64_t TSFlags) {
|
||||
return (TSFlags & InstFormatMask) >> InstFormatShift;
|
||||
}
|
||||
/// \returns the constraint for the instruction.
|
||||
static inline VConstraintType getConstraint(uint64_t TSFlags) {
|
||||
return static_cast<VConstraintType>
|
||||
((TSFlags & ConstraintMask) >> ConstraintShift);
|
||||
}
|
||||
/// \returns the LMUL for the instruction.
|
||||
static inline VLMUL getLMul(uint64_t TSFlags) {
|
||||
return static_cast<VLMUL>((TSFlags & VLMulMask) >> VLMulShift);
|
||||
}
|
||||
/// \returns true if there is a dummy mask operand for the instruction.
|
||||
static inline bool hasDummyMaskOp(uint64_t TSFlags) {
|
||||
return TSFlags & HasDummyMaskOpMask;
|
||||
}
|
||||
/// \returns true if tail agnostic is enforced for the instruction.
|
||||
static inline bool doesForceTailAgnostic(uint64_t TSFlags) {
|
||||
return TSFlags & ForceTailAgnosticMask;
|
||||
}
|
||||
/// \returns true if there is a merge operand for the instruction.
|
||||
static inline bool hasMergeOp(uint64_t TSFlags) {
|
||||
return TSFlags & HasMergeOpMask;
|
||||
}
|
||||
/// \returns true if there is a SEW operand for the instruction.
|
||||
static inline bool hasSEWOp(uint64_t TSFlags) {
|
||||
return TSFlags & HasSEWOpMask;
|
||||
}
|
||||
/// \returns true if there is a VL operand for the instruction.
|
||||
static inline bool hasVLOp(uint64_t TSFlags) {
|
||||
return TSFlags & HasVLOpMask;
|
||||
}
|
||||
|
||||
// RISC-V Specific Machine Operand Flags
|
||||
enum {
|
||||
MO_None = 0,
|
||||
|
@ -260,28 +318,6 @@ void validate(const Triple &TT, const FeatureBitset &FeatureBits);
|
|||
|
||||
} // namespace RISCVFeatures
|
||||
|
||||
enum class RISCVVSEW {
|
||||
SEW_8 = 0,
|
||||
SEW_16,
|
||||
SEW_32,
|
||||
SEW_64,
|
||||
SEW_128,
|
||||
SEW_256,
|
||||
SEW_512,
|
||||
SEW_1024,
|
||||
};
|
||||
|
||||
enum class RISCVVLMUL {
|
||||
LMUL_1 = 0,
|
||||
LMUL_2,
|
||||
LMUL_4,
|
||||
LMUL_8,
|
||||
LMUL_RESERVED,
|
||||
LMUL_F8,
|
||||
LMUL_F4,
|
||||
LMUL_F2
|
||||
};
|
||||
|
||||
namespace RISCVVType {
|
||||
// Is this a SEW value that can be encoded into the VTYPE format.
|
||||
inline static bool isValidSEW(unsigned SEW) {
|
||||
|
@ -302,7 +338,7 @@ inline static bool isValidLMUL(unsigned LMUL, bool Fractional) {
|
|||
// 6 | vta | Vector tail agnostic
|
||||
// 5:3 | vsew[2:0] | Standard element width (SEW) setting
|
||||
// 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting
|
||||
inline static unsigned encodeVTYPE(RISCVVLMUL VLMUL, RISCVVSEW VSEW,
|
||||
inline static unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, RISCVII::VSEW VSEW,
|
||||
bool TailAgnostic, bool MaskAgnostic) {
|
||||
unsigned VLMULBits = static_cast<unsigned>(VLMUL);
|
||||
unsigned VSEWBits = static_cast<unsigned>(VSEW);
|
||||
|
@ -315,14 +351,14 @@ inline static unsigned encodeVTYPE(RISCVVLMUL VLMUL, RISCVVSEW VSEW,
|
|||
return VTypeI;
|
||||
}
|
||||
|
||||
inline static RISCVVLMUL getVLMUL(unsigned VType) {
|
||||
inline static RISCVII::VLMUL getVLMUL(unsigned VType) {
|
||||
unsigned VLMUL = VType & 0x7;
|
||||
return static_cast<RISCVVLMUL>(VLMUL);
|
||||
return static_cast<RISCVII::VLMUL>(VLMUL);
|
||||
}
|
||||
|
||||
inline static RISCVVSEW getVSEW(unsigned VType) {
|
||||
inline static RISCVII::VSEW getVSEW(unsigned VType) {
|
||||
unsigned VSEW = (VType >> 3) & 0x7;
|
||||
return static_cast<RISCVVSEW>(VSEW);
|
||||
return static_cast<RISCVII::VSEW>(VSEW);
|
||||
}
|
||||
|
||||
inline static bool isTailAgnostic(unsigned VType) { return VType & 0x40; }
|
||||
|
|
|
@ -270,7 +270,7 @@ unsigned RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo,
|
|||
const MCOperand &MO = MI.getOperand(OpNo);
|
||||
|
||||
MCInstrDesc const &Desc = MCII.get(MI.getOpcode());
|
||||
unsigned MIFrm = Desc.TSFlags & RISCVII::InstFormatMask;
|
||||
unsigned MIFrm = RISCVII::getFormat(Desc.TSFlags);
|
||||
|
||||
// If the destination is an immediate, there is nothing to do.
|
||||
if (MO.isImm())
|
||||
|
|
|
@ -108,18 +108,18 @@ static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
|
|||
}
|
||||
|
||||
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
|
||||
unsigned NF, RISCVVLMUL LMUL) {
|
||||
unsigned NF, RISCVII::VLMUL LMUL) {
|
||||
switch (LMUL) {
|
||||
default:
|
||||
llvm_unreachable("Invalid LMUL.");
|
||||
case RISCVVLMUL::LMUL_F8:
|
||||
case RISCVVLMUL::LMUL_F4:
|
||||
case RISCVVLMUL::LMUL_F2:
|
||||
case RISCVVLMUL::LMUL_1:
|
||||
case RISCVII::VLMUL::LMUL_F8:
|
||||
case RISCVII::VLMUL::LMUL_F4:
|
||||
case RISCVII::VLMUL::LMUL_F2:
|
||||
case RISCVII::VLMUL::LMUL_1:
|
||||
return createM1Tuple(CurDAG, Regs, NF);
|
||||
case RISCVVLMUL::LMUL_2:
|
||||
case RISCVII::VLMUL::LMUL_2:
|
||||
return createM2Tuple(CurDAG, Regs, NF);
|
||||
case RISCVVLMUL::LMUL_4:
|
||||
case RISCVII::VLMUL::LMUL_4:
|
||||
return createM4Tuple(CurDAG, Regs, NF);
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
|
|||
unsigned NF = Node->getNumValues() - 1;
|
||||
MVT VT = Node->getSimpleValueType(0);
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
|
||||
unsigned CurOp = 2;
|
||||
SmallVector<SDValue, 8> Operands;
|
||||
|
@ -207,7 +207,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
|
|||
MVT VT = Node->getSimpleValueType(0);
|
||||
MVT XLenVT = Subtarget->getXLenVT();
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
|
||||
unsigned CurOp = 2;
|
||||
SmallVector<SDValue, 7> Operands;
|
||||
|
@ -251,7 +251,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
|
|||
unsigned NF = Node->getNumValues() - 1;
|
||||
MVT VT = Node->getSimpleValueType(0);
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
|
||||
unsigned CurOp = 2;
|
||||
SmallVector<SDValue, 8> Operands;
|
||||
|
@ -270,7 +270,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
|
|||
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
|
||||
"Element count mismatch");
|
||||
|
||||
RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
|
||||
const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
|
||||
NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
|
||||
|
@ -302,7 +302,7 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
|
|||
NF--;
|
||||
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
|
||||
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
|
||||
|
||||
|
@ -332,7 +332,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
|
|||
--NF;
|
||||
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
|
||||
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
|
||||
|
||||
|
@ -347,7 +347,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
|
|||
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
|
||||
"Element count mismatch");
|
||||
|
||||
RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
|
||||
const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
|
||||
NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
|
||||
|
@ -455,37 +455,37 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
|
||||
default:
|
||||
llvm_unreachable("Unexpected LMUL!");
|
||||
case RISCVVLMUL::LMUL_F8:
|
||||
case RISCVII::VLMUL::LMUL_F8:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_F4:
|
||||
case RISCVII::VLMUL::LMUL_F4:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_F2:
|
||||
case RISCVII::VLMUL::LMUL_F2:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_1:
|
||||
case RISCVII::VLMUL::LMUL_1:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_2:
|
||||
case RISCVII::VLMUL::LMUL_2:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_4:
|
||||
case RISCVII::VLMUL::LMUL_4:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_8:
|
||||
case RISCVII::VLMUL::LMUL_8:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
|
||||
VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
|
||||
|
@ -524,7 +524,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
|
||||
default:
|
||||
llvm_unreachable("Unexpected LMUL!");
|
||||
case RISCVVLMUL::LMUL_F8:
|
||||
case RISCVII::VLMUL::LMUL_F8:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
|
||||
|
@ -532,7 +532,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
|
||||
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_F4:
|
||||
case RISCVII::VLMUL::LMUL_F4:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
|
||||
|
@ -540,7 +540,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
|
||||
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_F2:
|
||||
case RISCVII::VLMUL::LMUL_F2:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
|
||||
|
@ -548,7 +548,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
|
||||
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_1:
|
||||
case RISCVII::VLMUL::LMUL_1:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
|
||||
|
@ -556,7 +556,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
|
||||
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_2:
|
||||
case RISCVII::VLMUL::LMUL_2:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
|
||||
|
@ -564,7 +564,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
|
||||
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_4:
|
||||
case RISCVII::VLMUL::LMUL_4:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
|
||||
|
@ -572,7 +572,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
|
||||
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
|
||||
break;
|
||||
case RISCVVLMUL::LMUL_8:
|
||||
case RISCVII::VLMUL::LMUL_8:
|
||||
VMSLTOpcode =
|
||||
IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
|
||||
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
|
||||
|
@ -630,9 +630,9 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
assert(Node->getNumOperands() == Offset + 2 &&
|
||||
"Unexpected number of operands");
|
||||
|
||||
RISCVVSEW VSEW =
|
||||
static_cast<RISCVVSEW>(Node->getConstantOperandVal(Offset) & 0x7);
|
||||
RISCVVLMUL VLMul = static_cast<RISCVVLMUL>(
|
||||
RISCVII::VSEW VSEW =
|
||||
static_cast<RISCVII::VSEW>(Node->getConstantOperandVal(Offset) & 0x7);
|
||||
RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
|
||||
Node->getConstantOperandVal(Offset + 1) & 0x7);
|
||||
|
||||
unsigned VTypeI = RISCVVType::encodeVTYPE(
|
||||
|
@ -785,8 +785,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
|
||||
"Element count mismatch");
|
||||
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
|
||||
const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
|
||||
IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
|
||||
|
@ -823,7 +823,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
|
||||
Operands);
|
||||
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
const RISCV::VLEPseudo *P =
|
||||
RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, ScalarSize,
|
||||
static_cast<unsigned>(LMUL));
|
||||
|
@ -851,7 +851,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
|
||||
/*IsStridedOrIndexed*/ false, Operands);
|
||||
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
const RISCV::VLEPseudo *P =
|
||||
RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true,
|
||||
ScalarSize, static_cast<unsigned>(LMUL));
|
||||
|
@ -976,8 +976,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
|
||||
"Element count mismatch");
|
||||
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVVLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
|
||||
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
|
||||
const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
|
||||
IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
|
||||
|
@ -1013,7 +1013,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
|
||||
Operands);
|
||||
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
|
||||
IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
|
||||
MachineSDNode *Store =
|
||||
|
@ -1066,10 +1066,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
if (Idx != 0)
|
||||
break;
|
||||
|
||||
RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
|
||||
bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
|
||||
SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
|
||||
SubVecLMUL == RISCVVLMUL::LMUL_F8;
|
||||
RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
|
||||
bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
|
||||
SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
|
||||
SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
|
||||
(void)IsSubVecPartReg; // Silence unused variable warning without asserts.
|
||||
assert((!IsSubVecPartReg || V.isUndef()) &&
|
||||
"Expecting lowering to have created legal INSERT_SUBVECTORs when "
|
||||
|
@ -1162,7 +1162,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
|
||||
Ld->getChain()};
|
||||
|
||||
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
||||
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
|
||||
/*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, ScalarSize,
|
||||
static_cast<unsigned>(LMUL));
|
||||
|
|
|
@ -1033,7 +1033,7 @@ static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
|
|||
}
|
||||
}
|
||||
|
||||
RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
|
||||
RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
|
||||
assert(VT.isScalableVector() && "Expecting a scalable vector type");
|
||||
unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
|
||||
if (VT.getVectorElementType() == MVT::i1)
|
||||
|
@ -1043,54 +1043,56 @@ RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
|
|||
default:
|
||||
llvm_unreachable("Invalid LMUL.");
|
||||
case 8:
|
||||
return RISCVVLMUL::LMUL_F8;
|
||||
return RISCVII::VLMUL::LMUL_F8;
|
||||
case 16:
|
||||
return RISCVVLMUL::LMUL_F4;
|
||||
return RISCVII::VLMUL::LMUL_F4;
|
||||
case 32:
|
||||
return RISCVVLMUL::LMUL_F2;
|
||||
return RISCVII::VLMUL::LMUL_F2;
|
||||
case 64:
|
||||
return RISCVVLMUL::LMUL_1;
|
||||
return RISCVII::VLMUL::LMUL_1;
|
||||
case 128:
|
||||
return RISCVVLMUL::LMUL_2;
|
||||
return RISCVII::VLMUL::LMUL_2;
|
||||
case 256:
|
||||
return RISCVVLMUL::LMUL_4;
|
||||
return RISCVII::VLMUL::LMUL_4;
|
||||
case 512:
|
||||
return RISCVVLMUL::LMUL_8;
|
||||
return RISCVII::VLMUL::LMUL_8;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) {
|
||||
unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
|
||||
switch (LMul) {
|
||||
default:
|
||||
llvm_unreachable("Invalid LMUL.");
|
||||
case RISCVVLMUL::LMUL_F8:
|
||||
case RISCVVLMUL::LMUL_F4:
|
||||
case RISCVVLMUL::LMUL_F2:
|
||||
case RISCVVLMUL::LMUL_1:
|
||||
case RISCVII::VLMUL::LMUL_F8:
|
||||
case RISCVII::VLMUL::LMUL_F4:
|
||||
case RISCVII::VLMUL::LMUL_F2:
|
||||
case RISCVII::VLMUL::LMUL_1:
|
||||
return RISCV::VRRegClassID;
|
||||
case RISCVVLMUL::LMUL_2:
|
||||
case RISCVII::VLMUL::LMUL_2:
|
||||
return RISCV::VRM2RegClassID;
|
||||
case RISCVVLMUL::LMUL_4:
|
||||
case RISCVII::VLMUL::LMUL_4:
|
||||
return RISCV::VRM4RegClassID;
|
||||
case RISCVVLMUL::LMUL_8:
|
||||
case RISCVII::VLMUL::LMUL_8:
|
||||
return RISCV::VRM8RegClassID;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
|
||||
RISCVVLMUL LMUL = getLMUL(VT);
|
||||
if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 ||
|
||||
LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) {
|
||||
RISCVII::VLMUL LMUL = getLMUL(VT);
|
||||
if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
|
||||
LMUL == RISCVII::VLMUL::LMUL_F4 ||
|
||||
LMUL == RISCVII::VLMUL::LMUL_F2 ||
|
||||
LMUL == RISCVII::VLMUL::LMUL_1) {
|
||||
static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
|
||||
"Unexpected subreg numbering");
|
||||
return RISCV::sub_vrm1_0 + Index;
|
||||
}
|
||||
if (LMUL == RISCVVLMUL::LMUL_2) {
|
||||
if (LMUL == RISCVII::VLMUL::LMUL_2) {
|
||||
static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
|
||||
"Unexpected subreg numbering");
|
||||
return RISCV::sub_vrm2_0 + Index;
|
||||
}
|
||||
if (LMUL == RISCVVLMUL::LMUL_4) {
|
||||
if (LMUL == RISCVII::VLMUL::LMUL_4) {
|
||||
static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
|
||||
"Unexpected subreg numbering");
|
||||
return RISCV::sub_vrm4_0 + Index;
|
||||
|
@ -3721,10 +3723,10 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
|
|||
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
|
||||
VecVT, SubVecVT, OrigIdx, TRI);
|
||||
|
||||
RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
|
||||
bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
|
||||
SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
|
||||
SubVecLMUL == RISCVVLMUL::LMUL_F8;
|
||||
RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
|
||||
bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
|
||||
SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
|
||||
SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
|
||||
|
||||
// 1. If the Idx has been completely eliminated and this subvector's size is
|
||||
// a vector register or a multiple thereof, or the surrounding elements are
|
||||
|
@ -6447,14 +6449,15 @@ static MachineInstr *elideCopies(MachineInstr *MI,
|
|||
|
||||
static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
|
||||
int VLIndex, unsigned SEWIndex,
|
||||
RISCVVLMUL VLMul, bool ForceTailAgnostic) {
|
||||
RISCVII::VLMUL VLMul,
|
||||
bool ForceTailAgnostic) {
|
||||
MachineFunction &MF = *BB->getParent();
|
||||
DebugLoc DL = MI.getDebugLoc();
|
||||
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
|
||||
|
||||
unsigned Log2SEW = MI.getOperand(SEWIndex).getImm();
|
||||
assert(RISCVVType::isValidSEW(1 << Log2SEW) && "Unexpected SEW");
|
||||
RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2SEW - 3);
|
||||
RISCVII::VSEW ElementWidth = static_cast<RISCVII::VSEW>(Log2SEW - 3);
|
||||
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
|
||||
|
@ -6522,14 +6525,13 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
|
|||
MachineBasicBlock *BB) const {
|
||||
uint64_t TSFlags = MI.getDesc().TSFlags;
|
||||
|
||||
if (TSFlags & RISCVII::HasSEWOpMask) {
|
||||
if (RISCVII::hasSEWOp(TSFlags)) {
|
||||
unsigned NumOperands = MI.getNumExplicitOperands();
|
||||
int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
|
||||
int VLIndex = RISCVII::hasVLOp(TSFlags) ? NumOperands - 2 : -1;
|
||||
unsigned SEWIndex = NumOperands - 1;
|
||||
bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask;
|
||||
bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags);
|
||||
|
||||
RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
|
||||
RISCVII::VLMulShift);
|
||||
RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
|
||||
return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
|
||||
}
|
||||
|
||||
|
|
|
@ -468,8 +468,8 @@ public:
|
|||
MVT PartVT, EVT ValueVT,
|
||||
Optional<CallingConv::ID> CC) const override;
|
||||
|
||||
static RISCVVLMUL getLMUL(MVT VT);
|
||||
static unsigned getRegClassIDForLMUL(RISCVVLMUL LMul);
|
||||
static RISCVII::VLMUL getLMUL(MVT VT);
|
||||
static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
|
||||
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
|
||||
static unsigned getRegClassIDForVecVT(MVT VT);
|
||||
static std::pair<unsigned, unsigned>
|
||||
|
|
|
@ -155,13 +155,13 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
|
|||
assert(OpNo >= 0 && "Operand number doesn't fit in an 'int' type");
|
||||
|
||||
// Skip VL and SEW operands which are the last two operands if present.
|
||||
if ((TSFlags & RISCVII::HasVLOpMask) && OpNo == (NumOps - 2))
|
||||
if (RISCVII::hasVLOp(TSFlags) && OpNo == (NumOps - 2))
|
||||
continue;
|
||||
if ((TSFlags & RISCVII::HasSEWOpMask) && OpNo == (NumOps - 1))
|
||||
if (RISCVII::hasSEWOp(TSFlags) && OpNo == (NumOps - 1))
|
||||
continue;
|
||||
|
||||
// Skip merge op. It should be the first operand after the result.
|
||||
if ((TSFlags & RISCVII::HasMergeOpMask) && OpNo == 1) {
|
||||
if (RISCVII::hasMergeOp(TSFlags) && OpNo == 1) {
|
||||
assert(MI->getNumExplicitDefs() == 1);
|
||||
continue;
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
|
|||
|
||||
// Unmasked pseudo instructions need to append dummy mask operand to
|
||||
// V instructions. All V instructions are modeled as the masked version.
|
||||
if (TSFlags & RISCVII::HasDummyMaskOpMask)
|
||||
if (RISCVII::hasDummyMaskOp(TSFlags))
|
||||
OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister));
|
||||
|
||||
return true;
|
||||
|
|
Loading…
Reference in New Issue