2003-10-06 03:27:59 +08:00
|
|
|
//===- CodeEmitterGen.cpp - Code Emitter Generator ------------------------===//
|
2005-04-22 08:00:37 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 08:00:37 +08:00
|
|
|
//
|
2003-10-21 04:20:30 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2003-10-06 03:27:59 +08:00
|
|
|
//
|
2004-08-05 06:07:54 +08:00
|
|
|
// CodeEmitterGen uses the descriptions of instructions and their fields to
|
|
|
|
// construct an automated code emitter: a function that, given a MachineInstr,
|
|
|
|
// returns the (currently, 32-bit unsigned) value of the instruction.
|
2003-10-06 03:27:59 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-12-01 01:48:10 +08:00
|
|
|
#include "CodeGenInstruction.h"
|
2004-08-10 03:10:43 +08:00
|
|
|
#include "CodeGenTarget.h"
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
#include "SubtargetFeatureInfo.h"
|
2019-03-12 01:04:35 +08:00
|
|
|
#include "Types.h"
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
#include "llvm/ADT/APInt.h"
|
2016-12-01 01:48:10 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2006-07-14 05:02:53 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2016-12-01 01:48:10 +08:00
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2012-12-04 18:37:14 +08:00
|
|
|
#include "llvm/TableGen/Record.h"
|
2012-06-11 23:37:55 +08:00
|
|
|
#include "llvm/TableGen/TableGenBackend.h"
|
2016-12-01 01:48:10 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
2010-12-13 09:05:54 +08:00
|
|
|
#include <map>
|
2016-12-01 01:48:10 +08:00
|
|
|
#include <set>
|
2012-06-11 23:37:55 +08:00
|
|
|
#include <string>
|
2016-12-01 01:48:10 +08:00
|
|
|
#include <utility>
|
2012-06-11 23:37:55 +08:00
|
|
|
#include <vector>
|
2016-12-01 01:48:10 +08:00
|
|
|
|
2004-08-01 11:55:39 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2012-06-11 23:37:55 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class CodeEmitterGen {
|
|
|
|
RecordKeeper &Records;
|
2016-12-01 01:48:10 +08:00
|
|
|
|
2012-06-11 23:37:55 +08:00
|
|
|
public:
|
|
|
|
CodeEmitterGen(RecordKeeper &R) : Records(R) {}
|
|
|
|
|
|
|
|
void run(raw_ostream &o);
|
2016-12-01 01:48:10 +08:00
|
|
|
|
2012-06-11 23:37:55 +08:00
|
|
|
private:
|
|
|
|
int getVariableBit(const std::string &VarName, BitsInit *BI, int bit);
|
|
|
|
std::string getInstructionCase(Record *R, CodeGenTarget &Target);
|
2019-09-19 21:39:54 +08:00
|
|
|
std::string getInstructionCaseForEncoding(Record *R, Record *EncodingDef,
|
|
|
|
CodeGenTarget &Target);
|
2012-06-11 23:37:55 +08:00
|
|
|
void AddCodeToMergeInOperand(Record *R, BitsInit *BI,
|
|
|
|
const std::string &VarName,
|
|
|
|
unsigned &NumberedOp,
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
std::set<unsigned> &NamedOpIndices,
|
2012-06-11 23:37:55 +08:00
|
|
|
std::string &Case, CodeGenTarget &Target);
|
|
|
|
|
2019-09-19 21:39:54 +08:00
|
|
|
void emitInstructionBaseValues(
|
|
|
|
raw_ostream &o, ArrayRef<const CodeGenInstruction *> NumberedInstructions,
|
|
|
|
CodeGenTarget &Target, int HwMode = -1);
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
unsigned BitWidth;
|
|
|
|
bool UseAPInt;
|
2012-06-11 23:37:55 +08:00
|
|
|
};
|
|
|
|
|
2006-07-14 06:17:08 +08:00
|
|
|
// If the VarBitInit at position 'bit' matches the specified variable then
|
|
|
|
// return the variable bit position. Otherwise return -1.
|
2009-12-16 04:21:44 +08:00
|
|
|
int CodeEmitterGen::getVariableBit(const std::string &VarName,
|
2011-07-30 06:43:06 +08:00
|
|
|
BitsInit *BI, int bit) {
|
2012-10-11 04:24:43 +08:00
|
|
|
if (VarBitInit *VBI = dyn_cast<VarBitInit>(BI->getBit(bit))) {
|
|
|
|
if (VarInit *VI = dyn_cast<VarInit>(VBI->getBitVar()))
|
2010-11-15 14:42:13 +08:00
|
|
|
if (VI->getName() == VarName)
|
|
|
|
return VBI->getBitNum();
|
2012-10-11 04:24:43 +08:00
|
|
|
} else if (VarInit *VI = dyn_cast<VarInit>(BI->getBit(bit))) {
|
2011-04-29 01:51:45 +08:00
|
|
|
if (VI->getName() == VarName)
|
|
|
|
return 0;
|
|
|
|
}
|
2010-10-08 00:56:28 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
return -1;
|
2010-10-08 00:56:28 +08:00
|
|
|
}
|
2006-07-14 05:02:53 +08:00
|
|
|
|
2010-11-15 14:59:17 +08:00
|
|
|
void CodeEmitterGen::
|
2011-07-30 06:43:06 +08:00
|
|
|
AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
|
2011-07-12 07:06:52 +08:00
|
|
|
unsigned &NumberedOp,
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
std::set<unsigned> &NamedOpIndices,
|
2010-11-15 14:59:17 +08:00
|
|
|
std::string &Case, CodeGenTarget &Target) {
|
|
|
|
CodeGenInstruction &CGI = Target.getInstruction(R);
|
|
|
|
|
2010-11-15 15:09:28 +08:00
|
|
|
// Determine if VarName actually contributes to the Inst encoding.
|
|
|
|
int bit = BI->getNumBits()-1;
|
|
|
|
|
|
|
|
// Scan for a bit that this contributed to.
|
|
|
|
for (; bit >= 0; ) {
|
|
|
|
if (getVariableBit(VarName, BI, bit) != -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
--bit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we found no bits, ignore this value, otherwise emit the call to get the
|
|
|
|
// operand encoding.
|
|
|
|
if (bit < 0) return;
|
|
|
|
|
|
|
|
// If the operand matches by name, reference according to that
|
|
|
|
// operand number. Non-matching operands are assumed to be in
|
|
|
|
// order.
|
|
|
|
unsigned OpIdx;
|
|
|
|
if (CGI.Operands.hasOperandNamed(VarName, OpIdx)) {
|
|
|
|
// Get the machine operand number for the indicated operand.
|
|
|
|
OpIdx = CGI.Operands[OpIdx].MIOperandNo;
|
|
|
|
assert(!CGI.Operands.isFlatOperandNotEmitted(OpIdx) &&
|
|
|
|
"Explicitly used operand also marked as not emitted!");
|
|
|
|
} else {
|
2012-11-10 04:29:37 +08:00
|
|
|
unsigned NumberOps = CGI.Operands.size();
|
2010-11-15 15:09:28 +08:00
|
|
|
/// If this operand is not supposed to be emitted by the
|
|
|
|
/// generated emitter, skip it.
|
2012-11-10 04:29:37 +08:00
|
|
|
while (NumberedOp < NumberOps &&
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
(CGI.Operands.isFlatOperandNotEmitted(NumberedOp) ||
|
2015-01-15 19:41:30 +08:00
|
|
|
(!NamedOpIndices.empty() && NamedOpIndices.count(
|
2014-03-22 19:33:32 +08:00
|
|
|
CGI.Operands.getSubOperandNumber(NumberedOp).first)))) {
|
2010-11-15 15:09:28 +08:00
|
|
|
++NumberedOp;
|
2012-11-10 05:27:03 +08:00
|
|
|
|
2014-03-22 19:33:32 +08:00
|
|
|
if (NumberedOp >= CGI.Operands.back().MIOperandNo +
|
|
|
|
CGI.Operands.back().MINumOperands) {
|
|
|
|
errs() << "Too few operands in record " << R->getName() <<
|
|
|
|
" (no match for variable " << VarName << "):\n";
|
|
|
|
errs() << *R;
|
|
|
|
errs() << '\n';
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-15 15:09:28 +08:00
|
|
|
OpIdx = NumberedOp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<unsigned, unsigned> SO = CGI.Operands.getSubOperandNumber(OpIdx);
|
|
|
|
std::string &EncoderMethodName = CGI.Operands[SO.first].EncoderMethodName;
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
|
|
|
|
if (UseAPInt)
|
|
|
|
Case += " op.clearAllBits();\n";
|
|
|
|
|
2010-11-15 15:09:28 +08:00
|
|
|
// If the source operand has a custom encoder, use it. This will
|
|
|
|
// get the encoding for all of the suboperands.
|
|
|
|
if (!EncoderMethodName.empty()) {
|
|
|
|
// A custom encoder has all of the information for the
|
|
|
|
// sub-operands, if there are more than one, so only
|
|
|
|
// query the encoder once per source operand.
|
|
|
|
if (SO.second == 0) {
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
Case += " // op: " + VarName + "\n";
|
|
|
|
if (UseAPInt) {
|
|
|
|
Case += " " + EncoderMethodName + "(MI, " + utostr(OpIdx);
|
|
|
|
Case += ", op";
|
|
|
|
} else {
|
|
|
|
Case += " op = " + EncoderMethodName + "(MI, " + utostr(OpIdx);
|
|
|
|
}
|
|
|
|
Case += ", Fixups, STI);\n";
|
2010-11-15 15:09:28 +08:00
|
|
|
}
|
|
|
|
} else {
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
Case += " // op: " + VarName + "\n";
|
|
|
|
if (UseAPInt) {
|
|
|
|
Case += " getMachineOpValue(MI, MI.getOperand(" + utostr(OpIdx) + ")";
|
|
|
|
Case += ", op, Fixups, STI";
|
|
|
|
} else {
|
|
|
|
Case += " op = getMachineOpValue(MI, MI.getOperand(" + utostr(OpIdx) + ")";
|
|
|
|
Case += ", Fixups, STI";
|
|
|
|
}
|
2010-11-15 15:09:28 +08:00
|
|
|
Case += ");\n";
|
|
|
|
}
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
|
|
|
|
// Precalculate the number of lits this variable contributes to in the
|
|
|
|
// operand. If there is a single lit (consecutive range of bits) we can use a
|
|
|
|
// destructive sequence on APInt that reduces memory allocations.
|
|
|
|
int numOperandLits = 0;
|
|
|
|
for (int tmpBit = bit; tmpBit >= 0;) {
|
|
|
|
int varBit = getVariableBit(VarName, BI, tmpBit);
|
|
|
|
|
|
|
|
// If this bit isn't from a variable, skip it.
|
|
|
|
if (varBit == -1) {
|
|
|
|
--tmpBit;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Figure out the consecutive range of bits covered by this operand, in
|
|
|
|
// order to generate better encoding code.
|
|
|
|
int beginVarBit = varBit;
|
|
|
|
int N = 1;
|
|
|
|
for (--tmpBit; tmpBit >= 0;) {
|
|
|
|
varBit = getVariableBit(VarName, BI, tmpBit);
|
|
|
|
if (varBit == -1 || varBit != (beginVarBit - N))
|
|
|
|
break;
|
|
|
|
++N;
|
|
|
|
--tmpBit;
|
|
|
|
}
|
|
|
|
++numOperandLits;
|
|
|
|
}
|
|
|
|
|
2010-11-15 15:09:28 +08:00
|
|
|
for (; bit >= 0; ) {
|
2010-11-15 14:59:17 +08:00
|
|
|
int varBit = getVariableBit(VarName, BI, bit);
|
|
|
|
|
|
|
|
// If this bit isn't from a variable, skip it.
|
|
|
|
if (varBit == -1) {
|
|
|
|
--bit;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-01-28 07:08:52 +08:00
|
|
|
// Figure out the consecutive range of bits covered by this operand, in
|
2010-11-15 14:59:17 +08:00
|
|
|
// order to generate better encoding code.
|
|
|
|
int beginInstBit = bit;
|
|
|
|
int beginVarBit = varBit;
|
|
|
|
int N = 1;
|
|
|
|
for (--bit; bit >= 0;) {
|
|
|
|
varBit = getVariableBit(VarName, BI, bit);
|
|
|
|
if (varBit == -1 || varBit != (beginVarBit - N)) break;
|
|
|
|
++N;
|
|
|
|
--bit;
|
|
|
|
}
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
|
|
|
|
std::string maskStr;
|
|
|
|
int opShift;
|
|
|
|
|
2019-09-19 02:14:42 +08:00
|
|
|
unsigned loBit = beginVarBit - N + 1;
|
|
|
|
unsigned hiBit = loBit + N;
|
|
|
|
unsigned loInstBit = beginInstBit - N + 1;
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
if (UseAPInt) {
|
2019-09-19 02:14:42 +08:00
|
|
|
std::string extractStr;
|
|
|
|
if (N >= 64) {
|
|
|
|
extractStr = "op.extractBits(" + itostr(hiBit - loBit) + ", " +
|
|
|
|
itostr(loBit) + ")";
|
|
|
|
Case += " Value.insertBits(" + extractStr + ", " +
|
|
|
|
itostr(loInstBit) + ");\n";
|
|
|
|
} else {
|
|
|
|
extractStr = "op.extractBitsAsZExtValue(" + itostr(hiBit - loBit) +
|
|
|
|
", " + itostr(loBit) + ")";
|
|
|
|
Case += " Value.insertBits(" + extractStr + ", " +
|
|
|
|
itostr(loInstBit) + ", " + itostr(hiBit - loBit) + ");\n";
|
|
|
|
}
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
} else {
|
|
|
|
uint64_t opMask = ~(uint64_t)0 >> (64 - N);
|
|
|
|
opShift = beginVarBit - N + 1;
|
|
|
|
opMask <<= opShift;
|
|
|
|
maskStr = "UINT64_C(" + utostr(opMask) + ")";
|
2019-09-19 02:14:42 +08:00
|
|
|
opShift = beginInstBit - beginVarBit;
|
|
|
|
|
|
|
|
if (numOperandLits == 1) {
|
|
|
|
Case += " op &= " + maskStr + ";\n";
|
|
|
|
if (opShift > 0) {
|
|
|
|
Case += " op <<= " + itostr(opShift) + ";\n";
|
|
|
|
} else if (opShift < 0) {
|
|
|
|
Case += " op >>= " + itostr(-opShift) + ";\n";
|
|
|
|
}
|
|
|
|
Case += " Value |= op;\n";
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
} else {
|
2019-09-19 02:14:42 +08:00
|
|
|
if (opShift > 0) {
|
|
|
|
Case += " Value |= (op & " + maskStr + ") << " +
|
|
|
|
itostr(opShift) + ";\n";
|
|
|
|
} else if (opShift < 0) {
|
|
|
|
Case += " Value |= (op & " + maskStr + ") >> " +
|
|
|
|
itostr(-opShift) + ";\n";
|
|
|
|
} else {
|
|
|
|
Case += " Value |= (op & " + maskStr + ");\n";
|
|
|
|
}
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
}
|
2010-11-15 14:59:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string CodeEmitterGen::getInstructionCase(Record *R,
|
|
|
|
CodeGenTarget &Target) {
|
|
|
|
std::string Case;
|
2019-09-19 21:39:54 +08:00
|
|
|
if (const RecordVal *RV = R->getValue("EncodingInfos")) {
|
|
|
|
if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
|
|
|
|
const CodeGenHwModes &HWM = Target.getHwModes();
|
|
|
|
EncodingInfoByHwMode EBM(DI->getDef(), HWM);
|
|
|
|
Case += " switch (HwMode) {\n";
|
|
|
|
Case += " default: llvm_unreachable(\"Unhandled HwMode\");\n";
|
|
|
|
for (auto &KV : EBM.Map) {
|
|
|
|
Case += " case " + itostr(KV.first) + ": {\n";
|
|
|
|
Case += getInstructionCaseForEncoding(R, KV.second, Target);
|
|
|
|
Case += " break;\n";
|
|
|
|
Case += " }\n";
|
|
|
|
}
|
|
|
|
Case += " }\n";
|
|
|
|
return Case;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return getInstructionCaseForEncoding(R, R, Target);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string CodeEmitterGen::getInstructionCaseForEncoding(Record *R, Record *EncodingDef,
|
|
|
|
CodeGenTarget &Target) {
|
|
|
|
std::string Case;
|
|
|
|
BitsInit *BI = EncodingDef->getValueAsBitsInit("Inst");
|
2010-11-15 14:59:17 +08:00
|
|
|
unsigned NumberedOp = 0;
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
std::set<unsigned> NamedOpIndices;
|
2017-07-04 14:16:53 +08:00
|
|
|
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
// Collect the set of operand indices that might correspond to named
|
|
|
|
// operand, and skip these when assigning operands based on position.
|
|
|
|
if (Target.getInstructionSet()->
|
|
|
|
getValueAsBit("noNamedPositionallyEncodedOperands")) {
|
|
|
|
CodeGenInstruction &CGI = Target.getInstruction(R);
|
2017-07-04 14:16:53 +08:00
|
|
|
for (const RecordVal &RV : R->getValues()) {
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
unsigned OpIdx;
|
2017-07-04 14:16:53 +08:00
|
|
|
if (!CGI.Operands.hasOperandNamed(RV.getName(), OpIdx))
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
NamedOpIndices.insert(OpIdx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-15 14:59:17 +08:00
|
|
|
// Loop over all of the fields in the instruction, determining which are the
|
|
|
|
// operands to the instruction.
|
2019-09-19 21:39:54 +08:00
|
|
|
for (const RecordVal &RV : EncodingDef->getValues()) {
|
2010-11-15 14:59:17 +08:00
|
|
|
// Ignore fixed fields in the record, we're looking for values like:
|
|
|
|
// bits<5> RST = { ?, ?, ?, ?, ? };
|
2017-07-04 14:16:53 +08:00
|
|
|
if (RV.getPrefix() || RV.getValue()->isComplete())
|
2010-11-15 14:59:17 +08:00
|
|
|
continue;
|
|
|
|
|
2017-07-04 14:16:53 +08:00
|
|
|
AddCodeToMergeInOperand(R, BI, RV.getName(), NumberedOp,
|
[TableGen] Optionally forbid overlap between named and positional operands
There are currently two schemes for mapping instruction operands to
instruction-format variables for generating the instruction encoders and
decoders for the assembler and disassembler respectively: a) to map by name and
b) to map by position.
In the long run, we'd like to remove the position-based scheme and use only
name-based mapping. Unfortunately, the name-based scheme currently cannot deal
with complex operands (those with suboperands), and so we currently must use
the position-based scheme for those. On the other hand, the position-based
scheme cannot deal with (register) variables that are split into multiple
ranges. An upcoming commit to the PowerPC backend (adding VSX support) will
require this capability. While we could teach the position-based scheme to
handle that, since we'd like to move away from the position-based mapping
generally, it seems silly to teach it new tricks now. What makes more sense is
to allow for partial transitioning: use the name-based mapping when possible,
and only use the position-based scheme when necessary.
Now the problem is that mixing the two sensibly was not possible: the
position-based mapping would map based on position, but would not skip those
variables that were mapped by name. Instead, the two sets of assignments would
overlap. However, I cannot currently change the current behavior, because there
are some backends that rely on it [I think mistakenly, but I'll send a message
to llvmdev about that]. So I've added a new TableGen bit variable:
noNamedPositionallyEncodedOperands, that can be used to cause the
position-based mapping to skip variables mapped by name.
llvm-svn: 203767
2014-03-13 15:57:54 +08:00
|
|
|
NamedOpIndices, Case, Target);
|
2010-11-15 14:59:17 +08:00
|
|
|
}
|
2017-06-01 05:12:46 +08:00
|
|
|
|
|
|
|
StringRef PostEmitter = R->getValueAsString("PostEncoderMethod");
|
2014-01-29 07:13:18 +08:00
|
|
|
if (!PostEmitter.empty()) {
|
2017-06-01 05:12:46 +08:00
|
|
|
Case += " Value = ";
|
|
|
|
Case += PostEmitter;
|
|
|
|
Case += "(MI, Value";
|
2014-09-03 06:28:02 +08:00
|
|
|
Case += ", STI";
|
2014-01-29 07:13:18 +08:00
|
|
|
Case += ");\n";
|
|
|
|
}
|
2010-11-15 14:59:17 +08:00
|
|
|
|
|
|
|
return Case;
|
|
|
|
}
|
|
|
|
|
2019-03-12 01:04:35 +08:00
|
|
|
static std::string
|
|
|
|
getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
|
|
|
|
std::string Name = "CEFBS";
|
|
|
|
for (const auto &Feature : FeatureBitset)
|
|
|
|
Name += ("_" + Feature->getName()).str();
|
|
|
|
return Name;
|
|
|
|
}
|
|
|
|
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
static void emitInstBits(raw_ostream &OS, const APInt &Bits) {
|
|
|
|
for (unsigned I = 0; I < Bits.getNumWords(); ++I)
|
|
|
|
OS << ((I > 0) ? ", " : "") << "UINT64_C(" << utostr(Bits.getRawData()[I])
|
|
|
|
<< ")";
|
|
|
|
}
|
|
|
|
|
2019-09-19 21:39:54 +08:00
|
|
|
void CodeEmitterGen::emitInstructionBaseValues(
|
|
|
|
raw_ostream &o, ArrayRef<const CodeGenInstruction *> NumberedInstructions,
|
|
|
|
CodeGenTarget &Target, int HwMode) {
|
|
|
|
const CodeGenHwModes &HWM = Target.getHwModes();
|
|
|
|
if (HwMode == -1)
|
|
|
|
o << " static const uint64_t InstBits[] = {\n";
|
|
|
|
else
|
|
|
|
o << " static const uint64_t InstBits_" << HWM.getMode(HwMode).Name
|
|
|
|
<< "[] = {\n";
|
|
|
|
|
|
|
|
for (const CodeGenInstruction *CGI : NumberedInstructions) {
|
|
|
|
Record *R = CGI->TheDef;
|
|
|
|
|
|
|
|
if (R->getValueAsString("Namespace") == "TargetOpcode" ||
|
|
|
|
R->getValueAsBit("isPseudo")) {
|
|
|
|
o << " "; emitInstBits(o, APInt(BitWidth, 0)); o << ",\n";
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Record *EncodingDef = R;
|
|
|
|
if (const RecordVal *RV = R->getValue("EncodingInfos")) {
|
|
|
|
if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
|
|
|
|
EncodingInfoByHwMode EBM(DI->getDef(), HWM);
|
2019-10-09 17:15:34 +08:00
|
|
|
if (EBM.hasMode(HwMode))
|
|
|
|
EncodingDef = EBM.get(HwMode);
|
2019-09-19 21:39:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
BitsInit *BI = EncodingDef->getValueAsBitsInit("Inst");
|
|
|
|
|
|
|
|
// Start by filling in fixed values.
|
|
|
|
APInt Value(BitWidth, 0);
|
|
|
|
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
|
|
|
|
if (BitInit *B = dyn_cast<BitInit>(BI->getBit(e - i - 1)))
|
|
|
|
Value |= APInt(BitWidth, (uint64_t)B->getValue()) << (e - i - 1);
|
|
|
|
}
|
|
|
|
o << " ";
|
|
|
|
emitInstBits(o, Value);
|
|
|
|
o << "," << '\t' << "// " << R->getName() << "\n";
|
|
|
|
}
|
|
|
|
o << " UINT64_C(0)\n };\n";
|
|
|
|
}
|
|
|
|
|
2009-07-03 08:10:29 +08:00
|
|
|
void CodeEmitterGen::run(raw_ostream &o) {
|
2010-12-13 08:23:57 +08:00
|
|
|
CodeGenTarget Target(Records);
|
2003-10-06 03:27:59 +08:00
|
|
|
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
|
2010-10-08 00:56:28 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
// For little-endian instruction bit encodings, reverse the bit order
|
2013-12-18 06:37:50 +08:00
|
|
|
Target.reverseBitsForLittleEndianEncoding();
|
2010-10-08 00:56:28 +08:00
|
|
|
|
2016-02-01 09:33:42 +08:00
|
|
|
ArrayRef<const CodeGenInstruction*> NumberedInstructions =
|
2010-03-19 08:34:35 +08:00
|
|
|
Target.getInstructionsByEnumValue();
|
2003-10-06 03:27:59 +08:00
|
|
|
|
2019-09-19 21:39:54 +08:00
|
|
|
const CodeGenHwModes &HWM = Target.getHwModes();
|
|
|
|
// The set of HwModes used by instruction encodings.
|
|
|
|
std::set<unsigned> HwModes;
|
|
|
|
BitWidth = 0;
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
for (const CodeGenInstruction *CGI : NumberedInstructions) {
|
|
|
|
Record *R = CGI->TheDef;
|
|
|
|
if (R->getValueAsString("Namespace") == "TargetOpcode" ||
|
|
|
|
R->getValueAsBit("isPseudo"))
|
|
|
|
continue;
|
2004-08-11 04:54:58 +08:00
|
|
|
|
2019-09-19 21:39:54 +08:00
|
|
|
if (const RecordVal *RV = R->getValue("EncodingInfos")) {
|
|
|
|
if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
|
|
|
|
EncodingInfoByHwMode EBM(DI->getDef(), HWM);
|
|
|
|
for (auto &KV : EBM.Map) {
|
|
|
|
BitsInit *BI = KV.second->getValueAsBitsInit("Inst");
|
|
|
|
BitWidth = std::max(BitWidth, BI->getNumBits());
|
|
|
|
HwModes.insert(KV.first);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
BitsInit *BI = R->getValueAsBitsInit("Inst");
|
2019-09-19 21:39:54 +08:00
|
|
|
BitWidth = std::max(BitWidth, BI->getNumBits());
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
}
|
|
|
|
UseAPInt = BitWidth > 64;
|
|
|
|
|
|
|
|
// Emit function declaration
|
|
|
|
if (UseAPInt) {
|
|
|
|
o << "void " << Target.getName()
|
|
|
|
<< "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
|
|
|
|
<< " SmallVectorImpl<MCFixup> &Fixups,\n"
|
|
|
|
<< " APInt &Inst,\n"
|
|
|
|
<< " APInt &Scratch,\n"
|
|
|
|
<< " const MCSubtargetInfo &STI) const {\n";
|
|
|
|
} else {
|
|
|
|
o << "uint64_t " << Target.getName();
|
|
|
|
o << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
|
|
|
|
<< " SmallVectorImpl<MCFixup> &Fixups,\n"
|
|
|
|
<< " const MCSubtargetInfo &STI) const {\n";
|
|
|
|
}
|
|
|
|
|
2006-07-13 03:15:43 +08:00
|
|
|
// Emit instruction base values
|
2019-09-19 21:39:54 +08:00
|
|
|
if (HwModes.empty()) {
|
|
|
|
emitInstructionBaseValues(o, NumberedInstructions, Target, -1);
|
|
|
|
} else {
|
|
|
|
for (unsigned HwMode : HwModes)
|
|
|
|
emitInstructionBaseValues(o, NumberedInstructions, Target, (int)HwMode);
|
|
|
|
}
|
2003-10-06 03:27:59 +08:00
|
|
|
|
2019-09-19 21:39:54 +08:00
|
|
|
if (!HwModes.empty()) {
|
|
|
|
o << " const uint64_t *InstBits;\n";
|
|
|
|
o << " unsigned HwMode = STI.getHwMode();\n";
|
|
|
|
o << " switch (HwMode) {\n";
|
|
|
|
o << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
|
|
|
|
for (unsigned I : HwModes) {
|
|
|
|
o << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
|
|
|
|
<< "; break;\n";
|
2003-10-06 03:27:59 +08:00
|
|
|
}
|
2019-09-19 21:39:54 +08:00
|
|
|
o << " };\n";
|
2006-07-13 03:15:43 +08:00
|
|
|
}
|
2019-09-19 21:39:54 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
// Map to accumulate all the cases.
|
2016-12-01 01:48:10 +08:00
|
|
|
std::map<std::string, std::vector<std::string>> CaseMap;
|
2010-10-08 00:56:28 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
// Construct all cases statement for each opcode
|
|
|
|
for (std::vector<Record*>::iterator IC = Insts.begin(), EC = Insts.end();
|
|
|
|
IC != EC; ++IC) {
|
|
|
|
Record *R = *IC;
|
2011-07-07 05:33:38 +08:00
|
|
|
if (R->getValueAsString("Namespace") == "TargetOpcode" ||
|
|
|
|
R->getValueAsBit("isPseudo"))
|
2010-07-03 05:44:22 +08:00
|
|
|
continue;
|
2017-06-01 03:01:11 +08:00
|
|
|
std::string InstName =
|
|
|
|
(R->getValueAsString("Namespace") + "::" + R->getName()).str();
|
2010-11-15 14:59:17 +08:00
|
|
|
std::string Case = getInstructionCase(R, Target);
|
2010-11-11 09:19:24 +08:00
|
|
|
|
2017-06-01 03:01:11 +08:00
|
|
|
CaseMap[Case].push_back(std::move(InstName));
|
2006-07-14 05:02:53 +08:00
|
|
|
}
|
2005-04-22 08:00:37 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
// Emit initial function code
|
[CodeEmitter] Support instruction widths > 64 bits
Some VLIW instruction sets are Very Long Indeed. Using uint64_t constricts the Inst encoding to 64 bits (naturally).
This change switches CodeEmitter to a mode that uses APInts when Inst's bitwidth is > 64 bits (NFC for existing targets).
When Inst.BitWidth > 64 the prototype changes to:
void TargetMCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups,
APInt &Inst,
APInt &Scratch,
const MCSubtargetInfo &STI);
The Inst parameter returns the encoded instruction, the Scratch parameter is used internally for manipulating operands and is exposed so that the underlying storage can be reused between calls to getBinaryCodeForInstr. The goal is to elide any APInt constructions that we can.
Similarly the operand encoding prototype changes to:
getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI);
That is, the operand is passed by reference as APInt rather than returned as uint64_t.
To reiterate, this APInt mode is enabled only when Inst.BitWidth > 64, so this change is NFC for existing targets.
llvm-svn: 371928
2019-09-15 16:35:08 +08:00
|
|
|
if (UseAPInt) {
|
|
|
|
int NumWords = APInt::getNumWords(BitWidth);
|
|
|
|
int NumBytes = (BitWidth + 7) / 8;
|
|
|
|
o << " const unsigned opcode = MI.getOpcode();\n"
|
|
|
|
<< " if (Inst.getBitWidth() != " << BitWidth << ")\n"
|
|
|
|
<< " Inst = Inst.zext(" << BitWidth << ");\n"
|
|
|
|
<< " if (Scratch.getBitWidth() != " << BitWidth << ")\n"
|
|
|
|
<< " Scratch = Scratch.zext(" << BitWidth << ");\n"
|
|
|
|
<< " LoadIntFromMemory(Inst, (uint8_t*)&InstBits[opcode * " << NumWords
|
|
|
|
<< "], " << NumBytes << ");\n"
|
|
|
|
<< " APInt &Value = Inst;\n"
|
|
|
|
<< " APInt &op = Scratch;\n"
|
|
|
|
<< " switch (opcode) {\n";
|
|
|
|
} else {
|
|
|
|
o << " const unsigned opcode = MI.getOpcode();\n"
|
|
|
|
<< " uint64_t Value = InstBits[opcode];\n"
|
|
|
|
<< " uint64_t op = 0;\n"
|
|
|
|
<< " (void)op; // suppress warning\n"
|
|
|
|
<< " switch (opcode) {\n";
|
|
|
|
}
|
2005-04-22 08:00:37 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
// Emit each case statement
|
2016-12-01 01:48:10 +08:00
|
|
|
std::map<std::string, std::vector<std::string>>::iterator IE, EE;
|
2006-07-14 05:02:53 +08:00
|
|
|
for (IE = CaseMap.begin(), EE = CaseMap.end(); IE != EE; ++IE) {
|
|
|
|
const std::string &Case = IE->first;
|
|
|
|
std::vector<std::string> &InstList = IE->second;
|
2005-04-22 08:00:37 +08:00
|
|
|
|
2006-07-14 05:02:53 +08:00
|
|
|
for (int i = 0, N = InstList.size(); i < N; i++) {
|
|
|
|
if (i) o << "\n";
|
2011-02-04 07:26:36 +08:00
|
|
|
o << " case " << InstList[i] << ":";
|
2003-10-06 03:27:59 +08:00
|
|
|
}
|
2006-07-14 05:02:53 +08:00
|
|
|
o << " {\n";
|
|
|
|
o << Case;
|
2003-10-06 03:27:59 +08:00
|
|
|
o << " break;\n"
|
|
|
|
<< " }\n";
|
|
|
|
}
|
|
|
|
|
2004-10-14 13:53:01 +08:00
|
|
|
// Default case: unhandled opcode
|
2003-10-06 03:27:59 +08:00
|
|
|
o << " default:\n"
|
2014-06-27 06:52:05 +08:00
|
|
|
<< " std::string msg;\n"
|
|
|
|
<< " raw_string_ostream Msg(msg);\n"
|
2009-07-09 03:04:27 +08:00
|
|
|
<< " Msg << \"Not supported instr: \" << MI;\n"
|
2010-04-08 06:58:41 +08:00
|
|
|
<< " report_fatal_error(Msg.str());\n"
|
2019-09-19 02:14:42 +08:00
|
|
|
<< " }\n";
|
|
|
|
if (UseAPInt)
|
|
|
|
o << " Inst = Value;\n";
|
|
|
|
else
|
|
|
|
o << " return Value;\n";
|
|
|
|
o << "}\n\n";
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
|
|
|
|
const auto &All = SubtargetFeatureInfo::getAll(Records);
|
|
|
|
std::map<Record *, SubtargetFeatureInfo, LessRecordByID> SubtargetFeatures;
|
|
|
|
SubtargetFeatures.insert(All.begin(), All.end());
|
|
|
|
|
|
|
|
o << "#ifdef ENABLE_INSTR_PREDICATE_VERIFIER\n"
|
|
|
|
<< "#undef ENABLE_INSTR_PREDICATE_VERIFIER\n"
|
|
|
|
<< "#include <sstream>\n\n";
|
|
|
|
|
|
|
|
// Emit the subtarget feature enumeration.
|
2019-03-12 01:04:35 +08:00
|
|
|
SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(SubtargetFeatures,
|
|
|
|
o);
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
|
|
|
|
// Emit the name table for error messages.
|
|
|
|
o << "#ifndef NDEBUG\n";
|
|
|
|
SubtargetFeatureInfo::emitNameTable(SubtargetFeatures, o);
|
|
|
|
o << "#endif // NDEBUG\n";
|
|
|
|
|
|
|
|
// Emit the available features compute function.
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
Target.getName(), "MCCodeEmitter", "computeAvailableFeatures",
|
|
|
|
SubtargetFeatures, o);
|
|
|
|
|
2019-03-12 01:04:35 +08:00
|
|
|
std::vector<std::vector<Record *>> FeatureBitsets;
|
|
|
|
for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
|
|
|
|
FeatureBitsets.emplace_back();
|
|
|
|
for (Record *Predicate : Inst->TheDef->getValueAsListOfDefs("Predicates")) {
|
|
|
|
const auto &I = SubtargetFeatures.find(Predicate);
|
|
|
|
if (I != SubtargetFeatures.end())
|
|
|
|
FeatureBitsets.back().push_back(I->second.TheDef);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::sort(FeatureBitsets, [&](const std::vector<Record *> &A,
|
|
|
|
const std::vector<Record *> &B) {
|
|
|
|
if (A.size() < B.size())
|
|
|
|
return true;
|
|
|
|
if (A.size() > B.size())
|
|
|
|
return false;
|
2019-12-23 01:58:32 +08:00
|
|
|
for (auto Pair : zip(A, B)) {
|
2019-03-12 01:04:35 +08:00
|
|
|
if (std::get<0>(Pair)->getName() < std::get<1>(Pair)->getName())
|
|
|
|
return true;
|
|
|
|
if (std::get<0>(Pair)->getName() > std::get<1>(Pair)->getName())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
});
|
|
|
|
FeatureBitsets.erase(
|
|
|
|
std::unique(FeatureBitsets.begin(), FeatureBitsets.end()),
|
|
|
|
FeatureBitsets.end());
|
|
|
|
o << "#ifndef NDEBUG\n"
|
|
|
|
<< "// Feature bitsets.\n"
|
|
|
|
<< "enum : " << getMinimalTypeForRange(FeatureBitsets.size()) << " {\n"
|
|
|
|
<< " CEFBS_None,\n";
|
|
|
|
for (const auto &FeatureBitset : FeatureBitsets) {
|
|
|
|
if (FeatureBitset.empty())
|
|
|
|
continue;
|
|
|
|
o << " " << getNameForFeatureBitset(FeatureBitset) << ",\n";
|
|
|
|
}
|
|
|
|
o << "};\n\n"
|
2019-08-24 23:02:44 +08:00
|
|
|
<< "static constexpr FeatureBitset FeatureBitsets[] = {\n"
|
|
|
|
<< " {}, // CEFBS_None\n";
|
2019-03-12 01:04:35 +08:00
|
|
|
for (const auto &FeatureBitset : FeatureBitsets) {
|
|
|
|
if (FeatureBitset.empty())
|
|
|
|
continue;
|
|
|
|
o << " {";
|
|
|
|
for (const auto &Feature : FeatureBitset) {
|
|
|
|
const auto &I = SubtargetFeatures.find(Feature);
|
|
|
|
assert(I != SubtargetFeatures.end() && "Didn't import predicate?");
|
|
|
|
o << I->second.getEnumBitName() << ", ";
|
|
|
|
}
|
|
|
|
o << "},\n";
|
|
|
|
}
|
|
|
|
o << "};\n"
|
|
|
|
<< "#endif // NDEBUG\n\n";
|
|
|
|
|
|
|
|
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
// Emit the predicate verifier.
|
|
|
|
o << "void " << Target.getName()
|
|
|
|
<< "MCCodeEmitter::verifyInstructionPredicates(\n"
|
2019-03-12 01:04:35 +08:00
|
|
|
<< " const MCInst &Inst, const FeatureBitset &AvailableFeatures) const {\n"
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
<< "#ifndef NDEBUG\n"
|
2019-03-12 01:04:35 +08:00
|
|
|
<< " static " << getMinimalTypeForRange(FeatureBitsets.size())
|
|
|
|
<< " RequiredFeaturesRefs[] = {\n";
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
unsigned InstIdx = 0;
|
|
|
|
for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
|
2019-03-12 01:04:35 +08:00
|
|
|
o << " CEFBS";
|
|
|
|
unsigned NumPredicates = 0;
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
for (Record *Predicate : Inst->TheDef->getValueAsListOfDefs("Predicates")) {
|
|
|
|
const auto &I = SubtargetFeatures.find(Predicate);
|
2019-03-12 01:04:35 +08:00
|
|
|
if (I != SubtargetFeatures.end()) {
|
|
|
|
o << '_' << I->second.TheDef->getName();
|
|
|
|
NumPredicates++;
|
|
|
|
}
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
}
|
2019-03-12 01:04:35 +08:00
|
|
|
if (!NumPredicates)
|
|
|
|
o << "_None";
|
|
|
|
o << ", // " << Inst->TheDef->getName() << " = " << InstIdx << "\n";
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
InstIdx++;
|
|
|
|
}
|
|
|
|
o << " };\n\n";
|
|
|
|
o << " assert(Inst.getOpcode() < " << InstIdx << ");\n";
|
2019-03-12 01:04:35 +08:00
|
|
|
o << " const FeatureBitset &RequiredFeatures = "
|
|
|
|
"FeatureBitsets[RequiredFeaturesRefs[Inst.getOpcode()]];\n";
|
|
|
|
o << " FeatureBitset MissingFeatures =\n"
|
|
|
|
<< " (AvailableFeatures & RequiredFeatures) ^\n"
|
|
|
|
<< " RequiredFeatures;\n"
|
|
|
|
<< " if (MissingFeatures.any()) {\n"
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
<< " std::ostringstream Msg;\n"
|
2016-11-19 22:47:41 +08:00
|
|
|
<< " Msg << \"Attempting to emit \" << "
|
|
|
|
"MCII.getName(Inst.getOpcode()).str()\n"
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
<< " << \" instruction but the \";\n"
|
2019-03-12 01:04:35 +08:00
|
|
|
<< " for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)\n"
|
|
|
|
<< " if (MissingFeatures.test(i))\n"
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
<< " Msg << SubtargetFeatureNames[i] << \" \";\n"
|
|
|
|
<< " Msg << \"predicate(s) are not met\";\n"
|
|
|
|
<< " report_fatal_error(Msg.str());\n"
|
|
|
|
<< " }\n"
|
2016-11-19 22:47:41 +08:00
|
|
|
<< "#else\n"
|
|
|
|
<< "// Silence unused variable warning on targets that don't use MCII for "
|
|
|
|
"other purposes (e.g. BPF).\n"
|
|
|
|
<< "(void)MCII;\n"
|
Check that emitted instructions meet their predicates on all targets except ARM, Mips, and X86.
Summary:
* ARM is omitted from this patch because this check appears to expose bugs in this target.
* Mips is omitted from this patch because this check either detects bugs or deliberate
emission of instructions that don't satisfy their predicates. One deliberate
use is the SYNC instruction where the version with an operand is correctly
defined as requiring MIPS32 while the version without an operand is defined
as an alias of 'SYNC 0' and requires MIPS2.
* X86 is omitted from this patch because it doesn't use the tablegen-erated
MCCodeEmitter infrastructure.
Patches for ARM and Mips will follow.
Depends on D25617
Reviewers: tstellarAMD, jmolloy
Subscribers: wdng, jmolloy, aemerson, rengolin, arsenm, jyknight, nemanjai, nhaehnle, tstellarAMD, llvm-commits
Differential Revision: https://reviews.llvm.org/D25618
llvm-svn: 287439
2016-11-19 21:05:44 +08:00
|
|
|
<< "#endif // NDEBUG\n";
|
|
|
|
o << "}\n";
|
|
|
|
o << "#endif\n";
|
2003-10-06 03:27:59 +08:00
|
|
|
}
|
2012-06-11 23:37:55 +08:00
|
|
|
|
2016-12-01 01:48:10 +08:00
|
|
|
} // end anonymous namespace
|
2012-06-11 23:37:55 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
void EmitCodeEmitter(RecordKeeper &RK, raw_ostream &OS) {
|
|
|
|
emitSourceFileHeader("Machine Code Emitter", OS);
|
|
|
|
CodeEmitterGen(RK).run(OS);
|
|
|
|
}
|
|
|
|
|
2016-12-01 01:48:10 +08:00
|
|
|
} // end namespace llvm
|