llvm-project/llvm/utils/TableGen/DAGISelEmitter.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

196 lines
7.1 KiB
C++
Raw Normal View History

//===- DAGISelEmitter.cpp - Generate an instruction selector --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend emits a DAG instruction selector.
//
//===----------------------------------------------------------------------===//
#include "CodeGenDAGPatterns.h"
#include "CodeGenInstruction.h"
#include "DAGISelMatcher.h"
#include "llvm/Support/Debug.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
[Modules] Make Support/Debug.h modular. This requires it to not change behavior based on other files defining DEBUG_TYPE, which means it cannot define DEBUG_TYPE at all. This is actually better IMO as it forces folks to define relevant DEBUG_TYPEs for their files. However, it requires all files that currently use DEBUG(...) to define a DEBUG_TYPE if they don't already. I've updated all such files in LLVM and will do the same for other upstream projects. This still leaves one important change in how LLVM uses the DEBUG_TYPE macro going forward: we need to only define the macro *after* header files have been #include-ed. Previously, this wasn't possible because Debug.h required the macro to be pre-defined. This commit removes that. By defining DEBUG_TYPE after the includes two things are fixed: - Header files that need to provide a DEBUG_TYPE for some inline code can do so by defining the macro before their inline code and undef-ing it afterward so the macro does not escape. - We no longer have rampant ODR violations due to including headers with different DEBUG_TYPE definitions. This may be mostly an academic violation today, but with modules these types of violations are easy to check for and potentially very relevant. Where necessary to suppor headers with DEBUG_TYPE, I have moved the definitions below the includes in this commit. I plan to move the rest of the DEBUG_TYPE macros in LLVM in subsequent commits; this one is big enough. The comments in Debug.h, which were hilariously out of date already, have been updated to reflect the recommended practice going forward. llvm-svn: 206822
2014-04-22 06:55:11 +08:00
#define DEBUG_TYPE "dag-isel-emitter"
namespace {
/// DAGISelEmitter - The top-level class which coordinates construction
/// and emission of the instruction selector.
class DAGISelEmitter {
RecordKeeper &Records; // Just so we can get at the timing functions.
CodeGenDAGPatterns CGP;
public:
explicit DAGISelEmitter(RecordKeeper &R) : Records(R), CGP(R) {}
void run(raw_ostream &OS);
};
} // End anonymous namespace
//===----------------------------------------------------------------------===//
// DAGISelEmitter Helper methods
//
/// getResultPatternCost - Compute the number of instructions for this pattern.
/// This is a temporary hack. We should really include the instruction
/// latencies in this calculation.
static unsigned getResultPatternCost(TreePatternNode *P,
CodeGenDAGPatterns &CGP) {
if (P->isLeaf()) return 0;
2011-03-01 09:39:05 +08:00
unsigned Cost = 0;
Record *Op = P->getOperator();
if (Op->isSubClassOf("Instruction")) {
Cost++;
CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
if (II.usesCustomInserter)
Cost += 10;
}
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
Cost += getResultPatternCost(P->getChild(i), CGP);
return Cost;
}
/// getResultPatternCodeSize - Compute the code size of instructions for this
/// pattern.
static unsigned getResultPatternSize(TreePatternNode *P,
CodeGenDAGPatterns &CGP) {
if (P->isLeaf()) return 0;
unsigned Cost = 0;
Record *Op = P->getOperator();
if (Op->isSubClassOf("Instruction")) {
Cost += Op->getValueAsInt("CodeSize");
}
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
Cost += getResultPatternSize(P->getChild(i), CGP);
return Cost;
}
namespace {
// PatternSortingPredicate - return true if we prefer to match LHS before RHS.
// In particular, we want to match maximal patterns first and lowest cost within
// a particular complexity first.
struct PatternSortingPredicate {
PatternSortingPredicate(CodeGenDAGPatterns &cgp) : CGP(cgp) {}
CodeGenDAGPatterns &CGP;
2011-03-01 09:39:05 +08:00
bool operator()(const PatternToMatch *LHS, const PatternToMatch *RHS) {
const TreePatternNode *LT = LHS->getSrcPattern();
const TreePatternNode *RT = RHS->getSrcPattern();
2011-03-01 09:39:05 +08:00
MVT LHSVT = LT->getNumTypes() != 0 ? LT->getSimpleType(0) : MVT::Other;
MVT RHSVT = RT->getNumTypes() != 0 ? RT->getSimpleType(0) : MVT::Other;
if (LHSVT.isVector() != RHSVT.isVector())
return RHSVT.isVector();
2011-03-01 09:39:05 +08:00
if (LHSVT.isFloatingPoint() != RHSVT.isFloatingPoint())
return RHSVT.isFloatingPoint();
2011-03-01 09:39:05 +08:00
// Otherwise, if the patterns might both match, sort based on complexity,
// which means that we prefer to match patterns that cover more nodes in the
// input over nodes that cover fewer.
int LHSSize = LHS->getPatternComplexity(CGP);
int RHSSize = RHS->getPatternComplexity(CGP);
if (LHSSize > RHSSize) return true; // LHS -> bigger -> less cost
if (LHSSize < RHSSize) return false;
2011-03-01 09:39:05 +08:00
// If the patterns have equal complexity, compare generated instruction cost
unsigned LHSCost = getResultPatternCost(LHS->getDstPattern(), CGP);
unsigned RHSCost = getResultPatternCost(RHS->getDstPattern(), CGP);
if (LHSCost < RHSCost) return true;
if (LHSCost > RHSCost) return false;
2011-03-01 09:39:05 +08:00
unsigned LHSPatSize = getResultPatternSize(LHS->getDstPattern(), CGP);
unsigned RHSPatSize = getResultPatternSize(RHS->getDstPattern(), CGP);
if (LHSPatSize < RHSPatSize) return true;
if (LHSPatSize > RHSPatSize) return false;
2011-03-01 09:39:05 +08:00
[TableGen] Support multi-alternative pattern fragments A TableGen instruction record usually contains a DAG pattern that will describe the SelectionDAG operation that can be implemented by this instruction. However, there will be cases where several different DAG patterns can all be implemented by the same instruction. The way to represent this today is to write additional patterns in the Pattern (or usually Pat) class that map those extra DAG patterns to the instruction. This usually also works fine. However, I've noticed cases where the current setup seems to require quite a bit of extra (and duplicated) text in the target .td files. For example, in the SystemZ back-end, there are quite a number of instructions that can implement an "add-with-overflow" operation. The same instructions also need to be used to implement just plain addition (simply ignoring the extra overflow output). The current solution requires creating extra Pat pattern for every instruction, duplicating the information about which particular add operands map best to which particular instruction. This patch enhances TableGen to support a new PatFrags class, which can be used to encapsulate multiple alternative patterns that may all match to the same instruction. It operates the same way as the existing PatFrag class, except that it accepts a list of DAG patterns to match instead of just a single one. As an example, we can now define a PatFrags to match either an "add-with-overflow" or a regular add operation: def z_sadd : PatFrags<(ops node:$src1, node:$src2), [(z_saddo node:$src1, node:$src2), (add node:$src1, node:$src2)]>; and then use this in the add instruction pattern: defm AR : BinaryRRAndK<"ar", 0x1A, 0xB9F8, z_sadd, GR32, GR32>; These SystemZ target changes are implemented here as well. Note that PatFrag is now defined as a subclass of PatFrags, which means that some users of internals of PatFrag need to be updated. (E.g. instead of using PatFrag.Fragment you now need to use !head(PatFrag.Fragments).) The implementation is based on the following main ideas: - InlinePatternFragments may now replace each original pattern with several result patterns, not just one. - parseInstructionPattern delays calling InlinePatternFragments and InferAllTypes. Instead, it extracts a single DAG match pattern from the main instruction pattern. - Processing of the DAG match pattern part of the main instruction pattern now shares most code with processing match patterns from the Pattern class. - Direct use of main instruction patterns in InferFromPattern and EmitResultInstructionAsOperand is removed; everything now operates solely on DAG match patterns. Reviewed by: hfinkel Differential Revision: https://reviews.llvm.org/D48545 llvm-svn: 336999
2018-07-13 21:18:00 +08:00
// Sort based on the UID of the pattern, to reflect source order.
// Note that this is not guaranteed to be unique, since a single source
// pattern may have been resolved into multiple match patterns due to
// alternative fragments. To ensure deterministic output, always use
// std::stable_sort with this predicate.
return LHS->getID() < RHS->getID();
}
};
} // End anonymous namespace
void DAGISelEmitter::run(raw_ostream &OS) {
emitSourceFileHeader("DAG Instruction Selector for the " +
CGP.getTargetInfo().getName().str() + " target", OS);
2011-03-01 09:39:05 +08:00
OS << "// *** NOTE: This file is #included into the middle of the target\n"
<< "// *** instruction selector class. These functions are really "
<< "methods.\n\n";
2006-08-10 00:44:44 +08:00
OS << "// If GET_DAGISEL_DECL is #defined with any value, only function\n"
"// declarations will be included when this file is included.\n"
"// If GET_DAGISEL_BODY is #defined, its value should be the name of\n"
"// the instruction selector class. Function bodies will be emitted\n"
"// and each function's name will be qualified with the name of the\n"
"// class.\n"
"//\n"
"// When neither of the GET_DAGISEL* macros is defined, the functions\n"
"// are emitted inline.\n\n";
LLVM_DEBUG(errs() << "\n\nALL PATTERNS TO MATCH:\n\n";
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
E = CGP.ptm_end();
I != E; ++I) {
errs() << "PATTERN: ";
I->getSrcPattern()->dump();
errs() << "\nRESULT: ";
I->getDstPattern()->dump();
errs() << "\n";
});
// Add all the patterns to a temporary list so we can sort them.
Records.startTimer("Sort patterns");
std::vector<const PatternToMatch*> Patterns;
for (const PatternToMatch &PTM : CGP.ptms())
Patterns.push_back(&PTM);
// We want to process the matches in order of minimal cost. Sort the patterns
// so the least cost one is at the start.
2021-01-14 11:14:42 +08:00
llvm::stable_sort(Patterns, PatternSortingPredicate(CGP));
2011-03-01 09:39:05 +08:00
// Convert each variant of each pattern into a Matcher.
Records.startTimer("Convert to matchers");
std::vector<Matcher*> PatternMatchers;
for (const PatternToMatch *PTM : Patterns) {
for (unsigned Variant = 0; ; ++Variant) {
if (Matcher *M = ConvertPatternToMatcher(*PTM, Variant, CGP))
PatternMatchers.push_back(M);
else
break;
}
}
2011-03-01 09:39:05 +08:00
std::unique_ptr<Matcher> TheMatcher =
std::make_unique<ScopeMatcher>(PatternMatchers);
Records.startTimer("Optimize matchers");
OptimizeMatcher(TheMatcher, CGP);
//Matcher->dump();
Records.startTimer("Emit matcher table");
EmitMatcherTable(TheMatcher.get(), CGP, OS);
}
namespace llvm {
void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS) {
RK.startTimer("Parse patterns");
DAGISelEmitter(RK).run(OS);
}
} // End llvm namespace