2019-02-20 09:17:46 +08:00
|
|
|
//===- Pass.cpp - Pass infrastructure implementation ----------------------===//
|
|
|
|
//
|
2020-01-26 11:58:30 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
2019-12-24 01:35:36 +08:00
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2019-02-20 09:17:46 +08:00
|
|
|
//
|
2019-12-24 01:35:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-02-20 09:17:46 +08:00
|
|
|
//
|
|
|
|
// This file implements common pass infrastructure.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/Pass/Pass.h"
|
2019-03-11 06:44:47 +08:00
|
|
|
#include "PassDetail.h"
|
2019-05-02 02:14:15 +08:00
|
|
|
#include "mlir/IR/Diagnostics.h"
|
2019-09-03 10:24:47 +08:00
|
|
|
#include "mlir/IR/Dialect.h"
|
2021-06-23 09:16:55 +08:00
|
|
|
#include "mlir/IR/Threading.h"
|
2020-05-01 04:09:13 +08:00
|
|
|
#include "mlir/IR/Verifier.h"
|
2019-10-11 10:19:11 +08:00
|
|
|
#include "mlir/Support/FileUtilities.h"
|
2019-09-23 17:33:51 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2020-08-27 12:42:38 +08:00
|
|
|
#include "llvm/ADT/ScopeExit.h"
|
2020-04-30 06:08:25 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2019-03-27 12:15:54 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2019-10-11 10:19:11 +08:00
|
|
|
#include "llvm/Support/CrashRecoveryContext.h"
|
2019-03-19 02:56:18 +08:00
|
|
|
#include "llvm/Support/Mutex.h"
|
2019-03-27 12:15:54 +08:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2020-04-30 06:08:25 +08:00
|
|
|
#include "llvm/Support/Signals.h"
|
2019-03-27 12:15:54 +08:00
|
|
|
#include "llvm/Support/Threading.h"
|
2019-10-11 10:19:11 +08:00
|
|
|
#include "llvm/Support/ToolOutputFile.h"
|
2019-02-20 09:17:46 +08:00
|
|
|
|
|
|
|
using namespace mlir;
|
2019-02-28 09:49:51 +08:00
|
|
|
using namespace mlir::detail;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pass
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-02-20 09:17:46 +08:00
|
|
|
|
|
|
|
/// Out of line virtual method to ensure vtables and metadata are emitted to a
|
|
|
|
/// single .o file.
|
|
|
|
void Pass::anchor() {}
|
|
|
|
|
2019-12-24 07:54:55 +08:00
|
|
|
/// Attempt to initialize the options of this pass from the given string.
|
|
|
|
LogicalResult Pass::initializeOptions(StringRef options) {
|
|
|
|
return passOptions.parseFromString(options);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Copy the option values from 'other', which is another instance of this
|
|
|
|
/// pass.
|
|
|
|
void Pass::copyOptionValuesFrom(const Pass *other) {
|
|
|
|
passOptions.copyOptionValuesFrom(other->passOptions);
|
|
|
|
}
|
|
|
|
|
2019-10-10 04:48:38 +08:00
|
|
|
/// Prints out the pass in the textual representation of pipelines. If this is
|
|
|
|
/// an adaptor pass, print with the op_name(sub_pass,...) format.
|
2020-11-13 15:45:07 +08:00
|
|
|
void Pass::printAsTextualPipeline(raw_ostream &os) {
|
2019-10-10 04:48:38 +08:00
|
|
|
// Special case for adaptors to use the 'op_name(sub_passes)' format.
|
2020-04-30 06:08:05 +08:00
|
|
|
if (auto *adaptor = dyn_cast<OpToOpPassAdaptor>(this)) {
|
2020-04-15 05:53:28 +08:00
|
|
|
llvm::interleaveComma(adaptor->getPassManagers(), os,
|
|
|
|
[&](OpPassManager &pm) {
|
|
|
|
os << pm.getOpName() << "(";
|
2020-11-13 15:45:07 +08:00
|
|
|
pm.printAsTextualPipeline(os);
|
2020-04-15 05:53:28 +08:00
|
|
|
os << ")";
|
|
|
|
});
|
2019-12-24 07:54:55 +08:00
|
|
|
return;
|
|
|
|
}
|
2020-04-11 13:48:58 +08:00
|
|
|
// Otherwise, print the pass argument followed by its options. If the pass
|
|
|
|
// doesn't have an argument, print the name of the pass to give some indicator
|
|
|
|
// of what pass was run.
|
|
|
|
StringRef argument = getArgument();
|
|
|
|
if (!argument.empty())
|
|
|
|
os << argument;
|
2019-12-24 07:54:55 +08:00
|
|
|
else
|
2020-04-11 13:48:58 +08:00
|
|
|
os << "unknown<" << getName() << ">";
|
2019-12-24 07:54:55 +08:00
|
|
|
passOptions.print(os);
|
2019-10-10 04:48:38 +08:00
|
|
|
}
|
|
|
|
|
2019-09-03 10:24:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-09-10 00:51:59 +08:00
|
|
|
// OpPassManagerImpl
|
2019-09-03 10:24:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace mlir {
|
|
|
|
namespace detail {
|
|
|
|
struct OpPassManagerImpl {
|
2020-11-03 19:17:39 +08:00
|
|
|
OpPassManagerImpl(Identifier identifier, OpPassManager::Nesting nesting)
|
2021-01-09 05:24:07 +08:00
|
|
|
: name(identifier.str()), identifier(identifier),
|
|
|
|
initializationGeneration(0), nesting(nesting) {}
|
2020-11-03 19:17:39 +08:00
|
|
|
OpPassManagerImpl(StringRef name, OpPassManager::Nesting nesting)
|
2021-01-09 05:24:07 +08:00
|
|
|
: name(name), initializationGeneration(0), nesting(nesting) {}
|
2019-09-03 10:24:47 +08:00
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
/// Merge the passes of this pass manager into the one provided.
|
2020-04-30 06:08:15 +08:00
|
|
|
void mergeInto(OpPassManagerImpl &rhs);
|
|
|
|
|
|
|
|
/// Nest a new operation pass manager for the given operation kind under this
|
|
|
|
/// pass manager.
|
2020-09-03 04:09:07 +08:00
|
|
|
OpPassManager &nest(Identifier nestedName);
|
2020-09-03 13:57:57 +08:00
|
|
|
OpPassManager &nest(StringRef nestedName);
|
2019-09-03 10:24:47 +08:00
|
|
|
|
2020-04-30 06:08:15 +08:00
|
|
|
/// Add the given pass to this pass manager. If this pass has a concrete
|
|
|
|
/// operation type, it must be the same type as this pass manager.
|
|
|
|
void addPass(std::unique_ptr<Pass> pass);
|
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
/// Coalesce adjacent AdaptorPasses into one large adaptor. This runs
|
|
|
|
/// recursively through the pipeline graph.
|
|
|
|
void coalesceAdjacentAdaptorPasses();
|
|
|
|
|
2021-01-09 05:24:07 +08:00
|
|
|
/// Return the operation name of this pass manager as an identifier.
|
2020-09-03 13:57:57 +08:00
|
|
|
Identifier getOpName(MLIRContext &context) {
|
|
|
|
if (!identifier)
|
|
|
|
identifier = Identifier::get(name, &context);
|
|
|
|
return *identifier;
|
|
|
|
}
|
2020-04-30 06:08:15 +08:00
|
|
|
|
2019-09-03 10:24:47 +08:00
|
|
|
/// The name of the operation that passes of this pass manager operate on.
|
2020-11-05 11:09:20 +08:00
|
|
|
std::string name;
|
2020-09-03 04:09:07 +08:00
|
|
|
|
2020-09-03 13:57:57 +08:00
|
|
|
/// The cached identifier (internalized in the context) for the name of the
|
|
|
|
/// operation that passes of this pass manager operate on.
|
|
|
|
Optional<Identifier> identifier;
|
2019-09-03 10:24:47 +08:00
|
|
|
|
|
|
|
/// The set of passes to run as part of this pass manager.
|
|
|
|
std::vector<std::unique_ptr<Pass>> passes;
|
2020-11-03 19:17:39 +08:00
|
|
|
|
2021-01-09 05:24:07 +08:00
|
|
|
/// The current initialization generation of this pass manager. This is used
|
|
|
|
/// to indicate when a pass manager should be reinitialized.
|
|
|
|
unsigned initializationGeneration;
|
|
|
|
|
2020-11-03 19:17:39 +08:00
|
|
|
/// Control the implicit nesting of passes that mismatch the name set for this
|
|
|
|
/// OpPassManager.
|
|
|
|
OpPassManager::Nesting nesting;
|
2019-09-03 10:24:47 +08:00
|
|
|
};
|
|
|
|
} // end namespace detail
|
|
|
|
} // end namespace mlir
|
|
|
|
|
2020-04-30 06:08:15 +08:00
|
|
|
void OpPassManagerImpl::mergeInto(OpPassManagerImpl &rhs) {
|
|
|
|
assert(name == rhs.name && "merging unrelated pass managers");
|
|
|
|
for (auto &pass : passes)
|
|
|
|
rhs.passes.push_back(std::move(pass));
|
|
|
|
passes.clear();
|
|
|
|
}
|
|
|
|
|
2020-09-03 04:09:07 +08:00
|
|
|
OpPassManager &OpPassManagerImpl::nest(Identifier nestedName) {
|
2020-11-03 19:17:39 +08:00
|
|
|
OpPassManager nested(nestedName, nesting);
|
2020-09-03 13:57:57 +08:00
|
|
|
auto *adaptor = new OpToOpPassAdaptor(std::move(nested));
|
|
|
|
addPass(std::unique_ptr<Pass>(adaptor));
|
|
|
|
return adaptor->getPassManagers().front();
|
|
|
|
}
|
|
|
|
|
|
|
|
OpPassManager &OpPassManagerImpl::nest(StringRef nestedName) {
|
2020-11-03 19:17:39 +08:00
|
|
|
OpPassManager nested(nestedName, nesting);
|
2020-04-30 06:08:15 +08:00
|
|
|
auto *adaptor = new OpToOpPassAdaptor(std::move(nested));
|
|
|
|
addPass(std::unique_ptr<Pass>(adaptor));
|
|
|
|
return adaptor->getPassManagers().front();
|
|
|
|
}
|
|
|
|
|
|
|
|
void OpPassManagerImpl::addPass(std::unique_ptr<Pass> pass) {
|
|
|
|
// If this pass runs on a different operation than this pass manager, then
|
2020-11-03 19:17:39 +08:00
|
|
|
// implicitly nest a pass manager for this operation if enabled.
|
2020-04-30 06:08:15 +08:00
|
|
|
auto passOpName = pass->getOpName();
|
2020-11-05 11:09:20 +08:00
|
|
|
if (passOpName && passOpName->str() != name) {
|
2020-11-03 19:17:39 +08:00
|
|
|
if (nesting == OpPassManager::Nesting::Implicit)
|
|
|
|
return nest(*passOpName).addPass(std::move(pass));
|
|
|
|
llvm::report_fatal_error(llvm::Twine("Can't add pass '") + pass->getName() +
|
|
|
|
"' restricted to '" + *passOpName +
|
|
|
|
"' on a PassManager intended to run on '" + name +
|
|
|
|
"', did you intend to nest?");
|
|
|
|
}
|
2020-04-30 06:08:15 +08:00
|
|
|
|
|
|
|
passes.emplace_back(std::move(pass));
|
|
|
|
}
|
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
void OpPassManagerImpl::coalesceAdjacentAdaptorPasses() {
|
|
|
|
// Bail out early if there are no adaptor passes.
|
|
|
|
if (llvm::none_of(passes, [](std::unique_ptr<Pass> &pass) {
|
2020-04-30 06:08:05 +08:00
|
|
|
return isa<OpToOpPassAdaptor>(pass.get());
|
2019-09-10 00:51:59 +08:00
|
|
|
}))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Walk the pass list and merge adjacent adaptors.
|
2020-04-30 06:08:05 +08:00
|
|
|
OpToOpPassAdaptor *lastAdaptor = nullptr;
|
2019-09-10 00:51:59 +08:00
|
|
|
for (auto it = passes.begin(), e = passes.end(); it != e; ++it) {
|
|
|
|
// Check to see if this pass is an adaptor.
|
2020-04-30 06:08:05 +08:00
|
|
|
if (auto *currentAdaptor = dyn_cast<OpToOpPassAdaptor>(it->get())) {
|
2019-09-10 00:51:59 +08:00
|
|
|
// If it is the first adaptor in a possible chain, remember it and
|
|
|
|
// continue.
|
|
|
|
if (!lastAdaptor) {
|
|
|
|
lastAdaptor = currentAdaptor;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, merge into the existing adaptor and delete the current one.
|
|
|
|
currentAdaptor->mergeInto(*lastAdaptor);
|
|
|
|
it->reset();
|
2020-11-13 15:45:07 +08:00
|
|
|
} else if (lastAdaptor) {
|
|
|
|
// If this pass is not an adaptor, then coalesce and forget any existing
|
|
|
|
// adaptor.
|
2019-09-10 00:51:59 +08:00
|
|
|
for (auto &pm : lastAdaptor->getPassManagers())
|
|
|
|
pm.getImpl().coalesceAdjacentAdaptorPasses();
|
|
|
|
lastAdaptor = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there was an adaptor at the end of the manager, coalesce it as well.
|
|
|
|
if (lastAdaptor) {
|
|
|
|
for (auto &pm : lastAdaptor->getPassManagers())
|
|
|
|
pm.getImpl().coalesceAdjacentAdaptorPasses();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the adaptors have been merged, erase the empty slot corresponding
|
|
|
|
// to the merged adaptors that were nulled-out in the loop above.
|
|
|
|
llvm::erase_if(passes, std::logical_not<std::unique_ptr<Pass>>());
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// OpPassManager
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-11-03 19:17:39 +08:00
|
|
|
OpPassManager::OpPassManager(Identifier name, Nesting nesting)
|
|
|
|
: impl(new OpPassManagerImpl(name, nesting)) {}
|
|
|
|
OpPassManager::OpPassManager(StringRef name, Nesting nesting)
|
|
|
|
: impl(new OpPassManagerImpl(name, nesting)) {}
|
2019-09-10 04:43:51 +08:00
|
|
|
OpPassManager::OpPassManager(OpPassManager &&rhs) : impl(std::move(rhs.impl)) {}
|
2019-09-10 00:51:59 +08:00
|
|
|
OpPassManager::OpPassManager(const OpPassManager &rhs) { *this = rhs; }
|
|
|
|
OpPassManager &OpPassManager::operator=(const OpPassManager &rhs) {
|
2020-11-03 19:17:39 +08:00
|
|
|
impl.reset(new OpPassManagerImpl(rhs.impl->name, rhs.impl->nesting));
|
2021-01-09 06:41:14 +08:00
|
|
|
impl->initializationGeneration = rhs.impl->initializationGeneration;
|
2021-04-21 16:57:29 +08:00
|
|
|
for (auto &pass : rhs.impl->passes) {
|
|
|
|
auto newPass = pass->clone();
|
|
|
|
newPass->threadingSibling = pass.get();
|
|
|
|
impl->passes.push_back(std::move(newPass));
|
|
|
|
}
|
2019-09-10 00:51:59 +08:00
|
|
|
return *this;
|
2019-09-03 10:24:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
OpPassManager::~OpPassManager() {}
|
2019-02-28 02:57:59 +08:00
|
|
|
|
2019-12-06 03:52:58 +08:00
|
|
|
OpPassManager::pass_iterator OpPassManager::begin() {
|
2020-08-27 12:42:38 +08:00
|
|
|
return MutableArrayRef<std::unique_ptr<Pass>>{impl->passes}.begin();
|
|
|
|
}
|
|
|
|
OpPassManager::pass_iterator OpPassManager::end() {
|
|
|
|
return MutableArrayRef<std::unique_ptr<Pass>>{impl->passes}.end();
|
2019-12-06 03:52:58 +08:00
|
|
|
}
|
|
|
|
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
OpPassManager::const_pass_iterator OpPassManager::begin() const {
|
2020-08-27 12:42:38 +08:00
|
|
|
return ArrayRef<std::unique_ptr<Pass>>{impl->passes}.begin();
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
}
|
|
|
|
OpPassManager::const_pass_iterator OpPassManager::end() const {
|
2020-08-27 12:42:38 +08:00
|
|
|
return ArrayRef<std::unique_ptr<Pass>>{impl->passes}.end();
|
2019-02-28 02:57:59 +08:00
|
|
|
}
|
|
|
|
|
2019-09-03 10:24:47 +08:00
|
|
|
/// Nest a new operation pass manager for the given operation kind under this
|
|
|
|
/// pass manager.
|
2020-09-03 04:09:07 +08:00
|
|
|
OpPassManager &OpPassManager::nest(Identifier nestedName) {
|
2020-04-30 06:08:15 +08:00
|
|
|
return impl->nest(nestedName);
|
2019-09-03 10:24:47 +08:00
|
|
|
}
|
|
|
|
OpPassManager &OpPassManager::nest(StringRef nestedName) {
|
2020-04-30 06:08:15 +08:00
|
|
|
return impl->nest(nestedName);
|
2019-09-03 10:24:47 +08:00
|
|
|
}
|
|
|
|
|
2019-09-15 08:37:03 +08:00
|
|
|
/// Add the given pass to this pass manager. If this pass has a concrete
|
|
|
|
/// operation type, it must be the same type as this pass manager.
|
2019-09-03 10:24:47 +08:00
|
|
|
void OpPassManager::addPass(std::unique_ptr<Pass> pass) {
|
2020-04-30 06:08:15 +08:00
|
|
|
impl->addPass(std::move(pass));
|
2019-09-03 10:24:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the number of passes held by this manager.
|
|
|
|
size_t OpPassManager::size() const { return impl->passes.size(); }
|
|
|
|
|
|
|
|
/// Returns the internal implementation instance.
|
|
|
|
OpPassManagerImpl &OpPassManager::getImpl() { return *impl; }
|
|
|
|
|
2020-09-03 13:57:57 +08:00
|
|
|
/// Return the operation name that this pass manager operates on.
|
|
|
|
StringRef OpPassManager::getOpName() const { return impl->name; }
|
2019-09-03 10:24:47 +08:00
|
|
|
|
|
|
|
/// Return the operation name that this pass manager operates on.
|
2020-09-03 13:57:57 +08:00
|
|
|
Identifier OpPassManager::getOpName(MLIRContext &context) const {
|
|
|
|
return impl->getOpName(context);
|
|
|
|
}
|
2019-09-03 10:24:47 +08:00
|
|
|
|
2020-04-30 06:08:15 +08:00
|
|
|
/// Prints out the given passes as the textual representation of a pipeline.
|
|
|
|
static void printAsTextualPipeline(ArrayRef<std::unique_ptr<Pass>> passes,
|
2020-11-13 15:45:07 +08:00
|
|
|
raw_ostream &os) {
|
|
|
|
llvm::interleaveComma(passes, os, [&](const std::unique_ptr<Pass> &pass) {
|
|
|
|
pass->printAsTextualPipeline(os);
|
|
|
|
});
|
2019-10-10 04:48:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-30 06:08:15 +08:00
|
|
|
/// Prints out the passes of the pass manager as the textual representation
|
|
|
|
/// of pipelines.
|
2020-11-13 15:45:07 +08:00
|
|
|
void OpPassManager::printAsTextualPipeline(raw_ostream &os) {
|
|
|
|
::printAsTextualPipeline(impl->passes, os);
|
2020-09-23 13:50:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void OpPassManager::dump() {
|
|
|
|
llvm::errs() << "Pass Manager with " << impl->passes.size() << " passes: ";
|
2020-11-13 15:45:07 +08:00
|
|
|
::printAsTextualPipeline(impl->passes, llvm::errs());
|
2020-09-23 13:50:05 +08:00
|
|
|
llvm::errs() << "\n";
|
2020-04-30 06:08:15 +08:00
|
|
|
}
|
|
|
|
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
static void registerDialectsForPipeline(const OpPassManager &pm,
|
|
|
|
DialectRegistry &dialects) {
|
|
|
|
for (const Pass &pass : pm.getPasses())
|
|
|
|
pass.getDependentDialects(dialects);
|
|
|
|
}
|
|
|
|
|
|
|
|
void OpPassManager::getDependentDialects(DialectRegistry &dialects) const {
|
|
|
|
registerDialectsForPipeline(*this, dialects);
|
|
|
|
}
|
|
|
|
|
2021-01-09 05:24:07 +08:00
|
|
|
void OpPassManager::setNesting(Nesting nesting) { impl->nesting = nesting; }
|
|
|
|
|
2020-11-11 13:33:47 +08:00
|
|
|
OpPassManager::Nesting OpPassManager::getNesting() { return impl->nesting; }
|
|
|
|
|
2021-02-11 09:36:40 +08:00
|
|
|
LogicalResult OpPassManager::initialize(MLIRContext *context,
|
|
|
|
unsigned newInitGeneration) {
|
2021-01-09 05:24:07 +08:00
|
|
|
if (impl->initializationGeneration == newInitGeneration)
|
2021-02-11 09:36:40 +08:00
|
|
|
return success();
|
2021-01-09 05:24:07 +08:00
|
|
|
impl->initializationGeneration = newInitGeneration;
|
|
|
|
for (Pass &pass : getPasses()) {
|
|
|
|
// If this pass isn't an adaptor, directly initialize it.
|
|
|
|
auto *adaptor = dyn_cast<OpToOpPassAdaptor>(&pass);
|
|
|
|
if (!adaptor) {
|
2021-02-11 09:36:40 +08:00
|
|
|
if (failed(pass.initialize(context)))
|
|
|
|
return failure();
|
2021-01-09 05:24:07 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, initialize each of the adaptors pass managers.
|
|
|
|
for (OpPassManager &adaptorPM : adaptor->getPassManagers())
|
2021-02-11 09:36:40 +08:00
|
|
|
if (failed(adaptorPM.initialize(context, newInitGeneration)))
|
|
|
|
return failure();
|
2021-01-09 05:24:07 +08:00
|
|
|
}
|
2021-02-11 09:36:40 +08:00
|
|
|
return success();
|
2021-01-09 05:24:07 +08:00
|
|
|
}
|
2020-11-11 13:33:47 +08:00
|
|
|
|
2019-02-28 09:49:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-09-03 10:24:47 +08:00
|
|
|
// OpToOpPassAdaptor
|
2019-02-28 09:49:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-08-27 12:42:38 +08:00
|
|
|
LogicalResult OpToOpPassAdaptor::run(Pass *pass, Operation *op,
|
2021-01-09 05:24:07 +08:00
|
|
|
AnalysisManager am, bool verifyPasses,
|
|
|
|
unsigned parentInitGeneration) {
|
2021-02-10 03:41:10 +08:00
|
|
|
if (!op->isRegistered())
|
2020-09-03 04:09:07 +08:00
|
|
|
return op->emitOpError()
|
|
|
|
<< "trying to schedule a pass on an unregistered operation";
|
2021-02-10 03:41:10 +08:00
|
|
|
if (!op->hasTrait<OpTrait::IsIsolatedFromAbove>())
|
2020-09-03 04:09:07 +08:00
|
|
|
return op->emitOpError() << "trying to schedule a pass on an operation not "
|
|
|
|
"marked as 'IsolatedFromAbove'";
|
|
|
|
|
2020-09-22 08:51:27 +08:00
|
|
|
// Initialize the pass state with a callback for the pass to dynamically
|
|
|
|
// execute a pipeline on the currently visited operation.
|
2020-12-15 10:07:45 +08:00
|
|
|
PassInstrumentor *pi = am.getPassInstrumentor();
|
|
|
|
PassInstrumentation::PipelineParentInfo parentInfo = {llvm::get_threadid(),
|
|
|
|
pass};
|
|
|
|
auto dynamic_pipeline_callback = [&](OpPassManager &pipeline,
|
|
|
|
Operation *root) -> LogicalResult {
|
2020-11-03 19:17:01 +08:00
|
|
|
if (!op->isAncestor(root))
|
|
|
|
return root->emitOpError()
|
|
|
|
<< "Trying to schedule a dynamic pipeline on an "
|
|
|
|
"operation that isn't "
|
|
|
|
"nested under the current operation the pass is processing";
|
2020-12-15 10:07:45 +08:00
|
|
|
assert(pipeline.getOpName() == root->getName().getStringRef());
|
2020-11-03 19:17:01 +08:00
|
|
|
|
2021-03-20 05:20:14 +08:00
|
|
|
// Before running, make sure to coalesce any adjacent pass adaptors in the
|
|
|
|
// pipeline.
|
|
|
|
pipeline.getImpl().coalesceAdjacentAdaptorPasses();
|
|
|
|
|
2021-01-09 05:24:07 +08:00
|
|
|
// Initialize the user provided pipeline and execute the pipeline.
|
2021-02-11 09:36:40 +08:00
|
|
|
if (failed(pipeline.initialize(root->getContext(), parentInitGeneration)))
|
|
|
|
return failure();
|
2020-12-15 10:07:45 +08:00
|
|
|
AnalysisManager nestedAm = root == op ? am : am.nest(root);
|
2020-11-03 19:17:01 +08:00
|
|
|
return OpToOpPassAdaptor::runPipeline(pipeline.getPasses(), root, nestedAm,
|
2021-01-09 05:24:07 +08:00
|
|
|
verifyPasses, parentInitGeneration,
|
|
|
|
pi, &parentInfo);
|
2020-11-03 19:17:01 +08:00
|
|
|
};
|
2020-09-22 08:51:27 +08:00
|
|
|
pass->passState.emplace(op, am, dynamic_pipeline_callback);
|
2020-12-15 10:07:45 +08:00
|
|
|
|
2020-08-27 12:42:38 +08:00
|
|
|
// Instrument before the pass has run.
|
|
|
|
if (pi)
|
|
|
|
pi->runBeforePass(pass, op);
|
|
|
|
|
|
|
|
// Invoke the virtual runOnOperation method.
|
2020-11-03 19:17:01 +08:00
|
|
|
if (auto *adaptor = dyn_cast<OpToOpPassAdaptor>(pass))
|
|
|
|
adaptor->runOnOperation(verifyPasses);
|
|
|
|
else
|
|
|
|
pass->runOnOperation();
|
|
|
|
bool passFailed = pass->passState->irAndPassFailed.getInt();
|
2020-08-27 12:42:38 +08:00
|
|
|
|
|
|
|
// Invalidate any non preserved analyses.
|
|
|
|
am.invalidate(pass->passState->preservedAnalyses);
|
|
|
|
|
2021-06-15 01:31:00 +08:00
|
|
|
// When verifyPasses is specified, we run the verifier (unless the pass
|
|
|
|
// failed).
|
|
|
|
if (!passFailed && verifyPasses) {
|
|
|
|
bool runVerifierNow = true;
|
|
|
|
// Reduce compile time by avoiding running the verifier if the pass didn't
|
|
|
|
// change the IR since the last time the verifier was run:
|
|
|
|
//
|
|
|
|
// 1) If the pass said that it preserved all analyses then it can't have
|
|
|
|
// permuted the IR.
|
|
|
|
// 2) If we just ran an OpToOpPassAdaptor (e.g. to run function passes
|
|
|
|
// within a module) then each sub-unit will have been verified on the
|
|
|
|
// subunit (and those passes aren't allowed to modify the parent).
|
|
|
|
//
|
|
|
|
// We run these checks in EXPENSIVE_CHECKS mode out of caution.
|
|
|
|
#ifndef EXPENSIVE_CHECKS
|
|
|
|
runVerifierNow = !isa<OpToOpPassAdaptor>(pass) &&
|
|
|
|
!pass->passState->preservedAnalyses.isAll();
|
|
|
|
#endif
|
|
|
|
if (runVerifierNow)
|
|
|
|
passFailed = failed(verify(op));
|
|
|
|
}
|
2020-11-03 19:17:01 +08:00
|
|
|
|
2020-08-27 12:42:38 +08:00
|
|
|
// Instrument after the pass has run.
|
|
|
|
if (pi) {
|
|
|
|
if (passFailed)
|
|
|
|
pi->runAfterPassFailed(pass, op);
|
|
|
|
else
|
|
|
|
pi->runAfterPass(pass, op);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return if the pass signaled a failure.
|
|
|
|
return failure(passFailed);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run the given operation and analysis manager on a provided op pass manager.
|
|
|
|
LogicalResult OpToOpPassAdaptor::runPipeline(
|
|
|
|
iterator_range<OpPassManager::pass_iterator> passes, Operation *op,
|
2021-01-09 05:24:07 +08:00
|
|
|
AnalysisManager am, bool verifyPasses, unsigned parentInitGeneration,
|
|
|
|
PassInstrumentor *instrumentor,
|
2020-12-15 10:07:45 +08:00
|
|
|
const PassInstrumentation::PipelineParentInfo *parentInfo) {
|
|
|
|
assert((!instrumentor || parentInfo) &&
|
|
|
|
"expected parent info if instrumentor is provided");
|
2020-08-27 12:42:38 +08:00
|
|
|
auto scope_exit = llvm::make_scope_exit([&] {
|
|
|
|
// Clear out any computed operation analyses. These analyses won't be used
|
|
|
|
// any more in this pipeline, and this helps reduce the current working set
|
|
|
|
// of memory. If preserving these analyses becomes important in the future
|
|
|
|
// we can re-evaluate this.
|
|
|
|
am.clear();
|
|
|
|
});
|
|
|
|
|
2019-09-03 10:24:47 +08:00
|
|
|
// Run the pipeline over the provided operation.
|
2020-12-15 10:07:45 +08:00
|
|
|
if (instrumentor)
|
|
|
|
instrumentor->runBeforePipeline(op->getName().getIdentifier(), *parentInfo);
|
2020-08-27 12:42:38 +08:00
|
|
|
for (Pass &pass : passes)
|
2021-01-09 05:24:07 +08:00
|
|
|
if (failed(run(&pass, op, am, verifyPasses, parentInitGeneration)))
|
2020-08-27 12:42:38 +08:00
|
|
|
return failure();
|
2020-12-15 10:07:45 +08:00
|
|
|
if (instrumentor)
|
|
|
|
instrumentor->runAfterPipeline(op->getName().getIdentifier(), *parentInfo);
|
2020-08-27 12:42:38 +08:00
|
|
|
return success();
|
2019-03-27 12:15:54 +08:00
|
|
|
}
|
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
/// Find an operation pass manager that can operate on an operation of the given
|
|
|
|
/// type, or nullptr if one does not exist.
|
|
|
|
static OpPassManager *findPassManagerFor(MutableArrayRef<OpPassManager> mgrs,
|
2020-09-03 13:57:57 +08:00
|
|
|
StringRef name) {
|
2019-09-10 00:51:59 +08:00
|
|
|
auto it = llvm::find_if(
|
|
|
|
mgrs, [&](OpPassManager &mgr) { return mgr.getOpName() == name; });
|
|
|
|
return it == mgrs.end() ? nullptr : &*it;
|
|
|
|
}
|
|
|
|
|
2020-09-03 13:57:57 +08:00
|
|
|
/// Find an operation pass manager that can operate on an operation of the given
|
|
|
|
/// type, or nullptr if one does not exist.
|
|
|
|
static OpPassManager *findPassManagerFor(MutableArrayRef<OpPassManager> mgrs,
|
|
|
|
Identifier name,
|
|
|
|
MLIRContext &context) {
|
|
|
|
auto it = llvm::find_if(
|
|
|
|
mgrs, [&](OpPassManager &mgr) { return mgr.getOpName(context) == name; });
|
|
|
|
return it == mgrs.end() ? nullptr : &*it;
|
|
|
|
}
|
|
|
|
|
2020-04-30 06:08:05 +08:00
|
|
|
OpToOpPassAdaptor::OpToOpPassAdaptor(OpPassManager &&mgr) {
|
2019-09-10 00:51:59 +08:00
|
|
|
mgrs.emplace_back(std::move(mgr));
|
|
|
|
}
|
|
|
|
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
void OpToOpPassAdaptor::getDependentDialects(DialectRegistry &dialects) const {
|
|
|
|
for (auto &pm : mgrs)
|
|
|
|
pm.getDependentDialects(dialects);
|
|
|
|
}
|
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
/// Merge the current pass adaptor into given 'rhs'.
|
2020-04-30 06:08:05 +08:00
|
|
|
void OpToOpPassAdaptor::mergeInto(OpToOpPassAdaptor &rhs) {
|
2019-09-10 00:51:59 +08:00
|
|
|
for (auto &pm : mgrs) {
|
|
|
|
// If an existing pass manager exists, then merge the given pass manager
|
|
|
|
// into it.
|
|
|
|
if (auto *existingPM = findPassManagerFor(rhs.mgrs, pm.getOpName())) {
|
|
|
|
pm.getImpl().mergeInto(existingPM->getImpl());
|
|
|
|
} else {
|
|
|
|
// Otherwise, add the given pass manager to the list.
|
|
|
|
rhs.mgrs.emplace_back(std::move(pm));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mgrs.clear();
|
|
|
|
|
|
|
|
// After coalescing, sort the pass managers within rhs by name.
|
|
|
|
llvm::array_pod_sort(rhs.mgrs.begin(), rhs.mgrs.end(),
|
|
|
|
[](const OpPassManager *lhs, const OpPassManager *rhs) {
|
2020-09-03 13:57:57 +08:00
|
|
|
return lhs->getOpName().compare(rhs->getOpName());
|
2019-09-10 00:51:59 +08:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-12-06 03:52:58 +08:00
|
|
|
/// Returns the adaptor pass name.
|
2020-04-30 06:08:05 +08:00
|
|
|
std::string OpToOpPassAdaptor::getAdaptorName() {
|
2019-12-06 03:52:58 +08:00
|
|
|
std::string name = "Pipeline Collection : [";
|
|
|
|
llvm::raw_string_ostream os(name);
|
2020-04-15 05:53:28 +08:00
|
|
|
llvm::interleaveComma(getPassManagers(), os, [&](OpPassManager &pm) {
|
2019-12-06 03:52:58 +08:00
|
|
|
os << '\'' << pm.getOpName() << '\'';
|
|
|
|
});
|
|
|
|
os << ']';
|
|
|
|
return os.str();
|
|
|
|
}
|
|
|
|
|
2019-09-03 10:24:47 +08:00
|
|
|
void OpToOpPassAdaptor::runOnOperation() {
|
2020-11-03 19:17:01 +08:00
|
|
|
llvm_unreachable(
|
|
|
|
"Unexpected call to Pass::runOnOperation() on OpToOpPassAdaptor");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run the held pipeline over all nested operations.
|
|
|
|
void OpToOpPassAdaptor::runOnOperation(bool verifyPasses) {
|
2020-05-03 03:28:57 +08:00
|
|
|
if (getContext().isMultithreadingEnabled())
|
2020-11-03 19:17:01 +08:00
|
|
|
runOnOperationAsyncImpl(verifyPasses);
|
2020-05-03 03:28:57 +08:00
|
|
|
else
|
2020-11-03 19:17:01 +08:00
|
|
|
runOnOperationImpl(verifyPasses);
|
2020-04-30 06:08:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Run this pass adaptor synchronously.
|
2020-11-03 19:17:01 +08:00
|
|
|
void OpToOpPassAdaptor::runOnOperationImpl(bool verifyPasses) {
|
2019-09-03 10:24:47 +08:00
|
|
|
auto am = getAnalysisManager();
|
2019-10-01 08:44:31 +08:00
|
|
|
PassInstrumentation::PipelineParentInfo parentInfo = {llvm::get_threadid(),
|
|
|
|
this};
|
2019-09-09 10:57:25 +08:00
|
|
|
auto *instrumentor = am.getPassInstrumentor();
|
2019-09-03 10:24:47 +08:00
|
|
|
for (auto ®ion : getOperation()->getRegions()) {
|
|
|
|
for (auto &block : region) {
|
|
|
|
for (auto &op : block) {
|
2020-09-03 13:57:57 +08:00
|
|
|
auto *mgr = findPassManagerFor(mgrs, op.getName().getIdentifier(),
|
|
|
|
*op.getContext());
|
2019-09-10 00:51:59 +08:00
|
|
|
if (!mgr)
|
2019-09-09 10:57:25 +08:00
|
|
|
continue;
|
|
|
|
|
2019-09-03 10:24:47 +08:00
|
|
|
// Run the held pipeline over the current operation.
|
2021-01-09 05:24:07 +08:00
|
|
|
unsigned initGeneration = mgr->impl->initializationGeneration;
|
2020-12-15 10:07:45 +08:00
|
|
|
if (failed(runPipeline(mgr->getPasses(), &op, am.nest(&op),
|
2021-01-09 05:24:07 +08:00
|
|
|
verifyPasses, initGeneration, instrumentor,
|
|
|
|
&parentInfo)))
|
2019-09-03 10:24:47 +08:00
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
}
|
2019-02-28 02:57:59 +08:00
|
|
|
}
|
|
|
|
}
|
2019-02-28 09:49:51 +08:00
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
/// Utility functor that checks if the two ranges of pass managers have a size
|
|
|
|
/// mismatch.
|
|
|
|
static bool hasSizeMismatch(ArrayRef<OpPassManager> lhs,
|
|
|
|
ArrayRef<OpPassManager> rhs) {
|
|
|
|
return lhs.size() != rhs.size() ||
|
|
|
|
llvm::any_of(llvm::seq<size_t>(0, lhs.size()),
|
|
|
|
[&](size_t i) { return lhs[i].size() != rhs[i].size(); });
|
|
|
|
}
|
2019-09-03 10:24:47 +08:00
|
|
|
|
2020-04-30 06:08:05 +08:00
|
|
|
/// Run this pass adaptor synchronously.
|
2020-11-03 19:17:01 +08:00
|
|
|
void OpToOpPassAdaptor::runOnOperationAsyncImpl(bool verifyPasses) {
|
2019-08-29 06:10:37 +08:00
|
|
|
AnalysisManager am = getAnalysisManager();
|
2021-06-23 11:16:10 +08:00
|
|
|
MLIRContext *context = &getContext();
|
2019-03-27 12:15:54 +08:00
|
|
|
|
|
|
|
// Create the async executors if they haven't been created, or if the main
|
2019-09-03 10:24:47 +08:00
|
|
|
// pipeline has changed.
|
2019-09-10 00:51:59 +08:00
|
|
|
if (asyncExecutors.empty() || hasSizeMismatch(asyncExecutors.front(), mgrs))
|
[Support] On Windows, ensure hardware_concurrency() extends to all CPU sockets and all NUMA groups
The goal of this patch is to maximize CPU utilization on multi-socket or high core count systems, so that parallel computations such as LLD/ThinLTO can use all hardware threads in the system. Before this patch, on Windows, a maximum of 64 hardware threads could be used at most, in some cases dispatched only on one CPU socket.
== Background ==
Windows doesn't have a flat cpu_set_t like Linux. Instead, it projects hardware CPUs (or NUMA nodes) to applications through a concept of "processor groups". A "processor" is the smallest unit of execution on a CPU, that is, an hyper-thread if SMT is active; a core otherwise. There's a limit of 32-bit processors on older 32-bit versions of Windows, which later was raised to 64-processors with 64-bit versions of Windows. This limit comes from the affinity mask, which historically is represented by the sizeof(void*). Consequently, the concept of "processor groups" was introduced for dealing with systems with more than 64 hyper-threads.
By default, the Windows OS assigns only one "processor group" to each starting application, in a round-robin manner. If the application wants to use more processors, it needs to programmatically enable it, by assigning threads to other "processor groups". This also means that affinity cannot cross "processor group" boundaries; one can only specify a "preferred" group on start-up, but the application is free to allocate more groups if it wants to.
This creates a peculiar situation, where newer CPUs like the AMD EPYC 7702P (64-cores, 128-hyperthreads) are projected by the OS as two (2) "processor groups". This means that by default, an application can only use half of the cores. This situation could only get worse in the years to come, as dies with more cores will appear on the market.
== The problem ==
The heavyweight_hardware_concurrency() API was introduced so that only *one hardware thread per core* was used. Once that API returns, that original intention is lost, only the number of threads is retained. Consider a situation, on Windows, where the system has 2 CPU sockets, 18 cores each, each core having 2 hyper-threads, for a total of 72 hyper-threads. Both heavyweight_hardware_concurrency() and hardware_concurrency() currently return 36, because on Windows they are simply wrappers over std::thread::hardware_concurrency() -- which can only return processors from the current "processor group".
== The changes in this patch ==
To solve this situation, we capture (and retain) the initial intention until the point of usage, through a new ThreadPoolStrategy class. The number of threads to use is deferred as late as possible, until the moment where the std::threads are created (ThreadPool in the case of ThinLTO).
When using hardware_concurrency(), setting ThreadCount to 0 now means to use all the possible hardware CPU (SMT) threads. Providing a ThreadCount above to the maximum number of threads will have no effect, the maximum will be used instead.
The heavyweight_hardware_concurrency() is similar to hardware_concurrency(), except that only one thread per hardware *core* will be used.
When LLVM_ENABLE_THREADS is OFF, the threading APIs will always return 1, to ensure any caller loops will be exercised at least once.
Differential Revision: https://reviews.llvm.org/D71775
2020-02-14 11:49:57 +08:00
|
|
|
asyncExecutors.assign(llvm::hardware_concurrency().compute_thread_count(),
|
|
|
|
mgrs);
|
2019-09-03 10:24:47 +08:00
|
|
|
|
2020-12-04 07:46:32 +08:00
|
|
|
// Run a prepass over the operation to collect the nested operations to
|
|
|
|
// execute over. This ensures that an analysis manager exists for each
|
|
|
|
// operation, as well as providing a queue of operations to execute over.
|
2019-09-03 10:24:47 +08:00
|
|
|
std::vector<std::pair<Operation *, AnalysisManager>> opAMPairs;
|
|
|
|
for (auto ®ion : getOperation()->getRegions()) {
|
|
|
|
for (auto &block : region) {
|
|
|
|
for (auto &op : block) {
|
2020-09-03 04:09:07 +08:00
|
|
|
// Add this operation iff the name matches any of the pass managers.
|
2021-06-23 11:16:10 +08:00
|
|
|
if (findPassManagerFor(mgrs, op.getName().getIdentifier(), *context))
|
2020-08-29 04:17:38 +08:00
|
|
|
opAMPairs.emplace_back(&op, am.nest(&op));
|
2019-09-03 10:24:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-03-27 12:15:54 +08:00
|
|
|
|
2019-09-09 10:57:25 +08:00
|
|
|
// Get the current thread for this adaptor.
|
2019-10-01 08:44:31 +08:00
|
|
|
PassInstrumentation::PipelineParentInfo parentInfo = {llvm::get_threadid(),
|
|
|
|
this};
|
2019-09-09 10:57:25 +08:00
|
|
|
auto *instrumentor = am.getPassInstrumentor();
|
|
|
|
|
2019-03-27 12:15:54 +08:00
|
|
|
// An atomic failure variable for the async executors.
|
2021-06-23 09:16:55 +08:00
|
|
|
std::vector<std::atomic<bool>> activePMs(asyncExecutors.size());
|
|
|
|
std::fill(activePMs.begin(), activePMs.end(), false);
|
|
|
|
auto processFn = [&](auto &opPMPair) {
|
|
|
|
// Find a pass manager for this operation.
|
|
|
|
auto it = llvm::find_if(activePMs, [](std::atomic<bool> &isActive) {
|
|
|
|
bool expectedInactive = false;
|
|
|
|
return isActive.compare_exchange_strong(expectedInactive, true);
|
|
|
|
});
|
|
|
|
unsigned pmIndex = it - activePMs.begin();
|
|
|
|
|
|
|
|
// Get the pass manager for this operation and execute it.
|
2021-06-23 11:16:10 +08:00
|
|
|
auto *pm =
|
|
|
|
findPassManagerFor(asyncExecutors[pmIndex],
|
|
|
|
opPMPair.first->getName().getIdentifier(), *context);
|
2021-06-23 09:16:55 +08:00
|
|
|
assert(pm && "expected valid pass manager for operation");
|
|
|
|
|
|
|
|
unsigned initGeneration = pm->impl->initializationGeneration;
|
|
|
|
LogicalResult pipelineResult =
|
|
|
|
runPipeline(pm->getPasses(), opPMPair.first, opPMPair.second,
|
|
|
|
verifyPasses, initGeneration, instrumentor, &parentInfo);
|
|
|
|
|
|
|
|
// Reset the active bit for this pass manager.
|
|
|
|
activePMs[pmIndex].store(false);
|
|
|
|
return pipelineResult;
|
|
|
|
};
|
2019-03-27 12:15:54 +08:00
|
|
|
|
|
|
|
// Signal a failure if any of the executors failed.
|
2021-06-23 11:16:10 +08:00
|
|
|
if (failed(failableParallelForEach(context, opAMPairs, processFn)))
|
2019-03-27 12:15:54 +08:00
|
|
|
signalPassFailure();
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:20:09 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PassManager
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-03-05 04:33:13 +08:00
|
|
|
|
2020-12-04 07:46:32 +08:00
|
|
|
PassManager::PassManager(MLIRContext *ctx, Nesting nesting,
|
|
|
|
StringRef operationName)
|
|
|
|
: OpPassManager(Identifier::get(operationName, ctx), nesting), context(ctx),
|
2021-01-27 08:54:25 +08:00
|
|
|
initializationKey(DenseMapInfo<llvm::hash_code>::getTombstoneKey()),
|
2021-05-20 07:53:35 +08:00
|
|
|
passTiming(false), verifyPasses(true) {}
|
2019-02-28 09:49:51 +08:00
|
|
|
|
|
|
|
PassManager::~PassManager() {}
|
|
|
|
|
2020-11-03 19:17:01 +08:00
|
|
|
void PassManager::enableVerifier(bool enabled) { verifyPasses = enabled; }
|
|
|
|
|
2020-12-04 07:46:32 +08:00
|
|
|
/// Run the passes within this manager on the provided operation.
|
|
|
|
LogicalResult PassManager::run(Operation *op) {
|
|
|
|
MLIRContext *context = getContext();
|
|
|
|
assert(op->getName().getIdentifier() == getOpName(*context) &&
|
2021-03-17 04:40:35 +08:00
|
|
|
"operation has a different name than the PassManager or is from a "
|
|
|
|
"different context");
|
2020-12-04 07:46:32 +08:00
|
|
|
|
2019-09-10 00:51:59 +08:00
|
|
|
// Before running, make sure to coalesce any adjacent pass adaptors in the
|
|
|
|
// pipeline.
|
2019-09-14 03:09:50 +08:00
|
|
|
getImpl().coalesceAdjacentAdaptorPasses();
|
2019-09-10 00:51:59 +08:00
|
|
|
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
// Register all dialects for the current pipeline.
|
|
|
|
DialectRegistry dependentDialects;
|
|
|
|
getDependentDialects(dependentDialects);
|
2021-02-10 17:11:50 +08:00
|
|
|
context->appendDialectRegistry(dependentDialects);
|
|
|
|
for (StringRef name : dependentDialects.getDialectNames())
|
|
|
|
context->getOrLoadDialect(name);
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
|
2021-01-09 05:24:07 +08:00
|
|
|
// Initialize all of the passes within the pass manager with a new generation.
|
2021-01-27 08:54:25 +08:00
|
|
|
llvm::hash_code newInitKey = context->getRegistryHash();
|
|
|
|
if (newInitKey != initializationKey) {
|
2021-02-11 09:36:40 +08:00
|
|
|
if (failed(initialize(context, impl->initializationGeneration + 1)))
|
|
|
|
return failure();
|
2021-01-27 08:54:25 +08:00
|
|
|
initializationKey = newInitKey;
|
|
|
|
}
|
2021-01-09 05:24:07 +08:00
|
|
|
|
2020-12-04 07:46:32 +08:00
|
|
|
// Construct a top level analysis manager for the pipeline.
|
|
|
|
ModuleAnalysisManager am(op, instrumentor.get());
|
2019-10-11 10:19:11 +08:00
|
|
|
|
2020-08-24 13:03:59 +08:00
|
|
|
// Notify the context that we start running a pipeline for book keeping.
|
2020-12-04 07:46:32 +08:00
|
|
|
context->enterMultiThreadedExecution();
|
2020-08-24 13:03:59 +08:00
|
|
|
|
2019-10-11 10:19:11 +08:00
|
|
|
// If reproducer generation is enabled, run the pass manager with crash
|
|
|
|
// handling enabled.
|
2020-12-04 07:46:32 +08:00
|
|
|
LogicalResult result =
|
2021-05-20 07:53:35 +08:00
|
|
|
crashReproGenerator ? runWithCrashRecovery(op, am) : runPasses(op, am);
|
2019-12-06 03:52:58 +08:00
|
|
|
|
2020-08-24 13:03:59 +08:00
|
|
|
// Notify the context that the run is done.
|
2020-12-04 07:46:32 +08:00
|
|
|
context->exitMultiThreadedExecution();
|
2020-08-24 13:03:59 +08:00
|
|
|
|
2019-12-06 03:52:58 +08:00
|
|
|
// Dump all of the pass statistics if necessary.
|
|
|
|
if (passStatisticsMode)
|
|
|
|
dumpStatistics();
|
|
|
|
return result;
|
2019-03-13 04:08:48 +08:00
|
|
|
}
|
|
|
|
|
2019-09-15 08:44:10 +08:00
|
|
|
/// Add the provided instrumentation to the pass manager.
|
|
|
|
void PassManager::addInstrumentation(std::unique_ptr<PassInstrumentation> pi) {
|
2019-03-11 05:45:47 +08:00
|
|
|
if (!instrumentor)
|
2019-09-23 17:33:51 +08:00
|
|
|
instrumentor = std::make_unique<PassInstrumentor>();
|
2019-03-11 05:45:47 +08:00
|
|
|
|
2019-09-15 08:44:10 +08:00
|
|
|
instrumentor->addInstrumentation(std::move(pi));
|
2019-03-11 05:45:47 +08:00
|
|
|
}
|
|
|
|
|
2021-05-20 07:53:35 +08:00
|
|
|
LogicalResult PassManager::runPasses(Operation *op, AnalysisManager am) {
|
|
|
|
return OpToOpPassAdaptor::runPipeline(getPasses(), op, am, verifyPasses,
|
|
|
|
impl->initializationGeneration);
|
|
|
|
}
|
|
|
|
|
Implement the initial AnalysisManagement infrastructure, with the introduction of the FunctionAnalysisManager and ModuleAnalysisManager classes. These classes provide analysis computation, caching, and invalidation for a specific IR unit. The invalidation is currently limited to either all or none, i.e. you cannot yet preserve specific analyses.
An analysis can be any class, but it must provide the following:
* A constructor for a given IR unit.
struct MyAnalysis {
// Compute this analysis with the provided module.
MyAnalysis(Module *module);
};
Analyses can be accessed from a Pass by calling either the 'getAnalysisResult<AnalysisT>' or 'getCachedAnalysisResult<AnalysisT>' methods. A FunctionPass may query for a cached analysis on the parent module with 'getCachedModuleAnalysisResult'. Similary, a ModulePass may query an analysis, it doesn't need to be cached, on a child function with 'getFunctionAnalysisResult'.
By default, when running a pass all cached analyses are set to be invalidated. If no transformation was performed, a pass can use the method 'markAllAnalysesPreserved' to preserve all analysis results. As noted above, preserving specific analyses is not yet supported.
PiperOrigin-RevId: 236505642
2019-03-03 13:46:58 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AnalysisManager
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-12-15 10:07:45 +08:00
|
|
|
/// Get an analysis manager for the given operation, which must be a proper
|
|
|
|
/// descendant of the current operation represented by this analysis manager.
|
|
|
|
AnalysisManager AnalysisManager::nest(Operation *op) {
|
|
|
|
Operation *currentOp = impl->getOperation();
|
|
|
|
assert(currentOp->isProperAncestor(op) &&
|
|
|
|
"expected valid descendant operation");
|
|
|
|
|
|
|
|
// Check for the base case where the provided operation is immediately nested.
|
|
|
|
if (currentOp == op->getParentOp())
|
|
|
|
return nestImmediate(op);
|
|
|
|
|
|
|
|
// Otherwise, we need to collect all ancestors up to the current operation.
|
|
|
|
SmallVector<Operation *, 4> opAncestors;
|
|
|
|
do {
|
|
|
|
opAncestors.push_back(op);
|
|
|
|
op = op->getParentOp();
|
|
|
|
} while (op != currentOp);
|
|
|
|
|
|
|
|
AnalysisManager result = *this;
|
|
|
|
for (Operation *op : llvm::reverse(opAncestors))
|
|
|
|
result = result.nestImmediate(op);
|
|
|
|
return result;
|
2019-03-11 05:45:47 +08:00
|
|
|
}
|
|
|
|
|
2020-12-15 10:07:45 +08:00
|
|
|
/// Get an analysis manager for the given immediately nested child operation.
|
|
|
|
AnalysisManager AnalysisManager::nestImmediate(Operation *op) {
|
|
|
|
assert(impl->getOperation() == op->getParentOp() &&
|
|
|
|
"expected immediate child operation");
|
|
|
|
|
2019-08-29 06:10:37 +08:00
|
|
|
auto it = impl->childAnalyses.find(op);
|
|
|
|
if (it == impl->childAnalyses.end())
|
|
|
|
it = impl->childAnalyses
|
2020-12-15 10:07:45 +08:00
|
|
|
.try_emplace(op, std::make_unique<NestedAnalysisMap>(op, impl))
|
2019-08-29 06:10:37 +08:00
|
|
|
.first;
|
2020-12-15 10:07:45 +08:00
|
|
|
return {it->second.get()};
|
Implement the initial AnalysisManagement infrastructure, with the introduction of the FunctionAnalysisManager and ModuleAnalysisManager classes. These classes provide analysis computation, caching, and invalidation for a specific IR unit. The invalidation is currently limited to either all or none, i.e. you cannot yet preserve specific analyses.
An analysis can be any class, but it must provide the following:
* A constructor for a given IR unit.
struct MyAnalysis {
// Compute this analysis with the provided module.
MyAnalysis(Module *module);
};
Analyses can be accessed from a Pass by calling either the 'getAnalysisResult<AnalysisT>' or 'getCachedAnalysisResult<AnalysisT>' methods. A FunctionPass may query for a cached analysis on the parent module with 'getCachedModuleAnalysisResult'. Similary, a ModulePass may query an analysis, it doesn't need to be cached, on a child function with 'getFunctionAnalysisResult'.
By default, when running a pass all cached analyses are set to be invalidated. If no transformation was performed, a pass can use the method 'markAllAnalysesPreserved' to preserve all analysis results. As noted above, preserving specific analyses is not yet supported.
PiperOrigin-RevId: 236505642
2019-03-03 13:46:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Invalidate any non preserved analyses.
|
2019-08-29 06:10:37 +08:00
|
|
|
void detail::NestedAnalysisMap::invalidate(
|
|
|
|
const detail::PreservedAnalyses &pa) {
|
2019-03-07 03:04:22 +08:00
|
|
|
// If all analyses were preserved, then there is nothing to do here.
|
Implement the initial AnalysisManagement infrastructure, with the introduction of the FunctionAnalysisManager and ModuleAnalysisManager classes. These classes provide analysis computation, caching, and invalidation for a specific IR unit. The invalidation is currently limited to either all or none, i.e. you cannot yet preserve specific analyses.
An analysis can be any class, but it must provide the following:
* A constructor for a given IR unit.
struct MyAnalysis {
// Compute this analysis with the provided module.
MyAnalysis(Module *module);
};
Analyses can be accessed from a Pass by calling either the 'getAnalysisResult<AnalysisT>' or 'getCachedAnalysisResult<AnalysisT>' methods. A FunctionPass may query for a cached analysis on the parent module with 'getCachedModuleAnalysisResult'. Similary, a ModulePass may query an analysis, it doesn't need to be cached, on a child function with 'getFunctionAnalysisResult'.
By default, when running a pass all cached analyses are set to be invalidated. If no transformation was performed, a pass can use the method 'markAllAnalysesPreserved' to preserve all analysis results. As noted above, preserving specific analyses is not yet supported.
PiperOrigin-RevId: 236505642
2019-03-03 13:46:58 +08:00
|
|
|
if (pa.isAll())
|
|
|
|
return;
|
|
|
|
|
2019-08-29 06:10:37 +08:00
|
|
|
// Invalidate the analyses for the current operation directly.
|
|
|
|
analyses.invalidate(pa);
|
2019-03-07 03:04:22 +08:00
|
|
|
|
2019-08-29 06:10:37 +08:00
|
|
|
// If no analyses were preserved, then just simply clear out the child
|
2019-03-07 03:04:22 +08:00
|
|
|
// analysis results.
|
|
|
|
if (pa.isNone()) {
|
2019-08-29 06:10:37 +08:00
|
|
|
childAnalyses.clear();
|
2019-03-07 03:04:22 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-29 06:10:37 +08:00
|
|
|
// Otherwise, invalidate each child analysis map.
|
|
|
|
SmallVector<NestedAnalysisMap *, 8> mapsToInvalidate(1, this);
|
|
|
|
while (!mapsToInvalidate.empty()) {
|
|
|
|
auto *map = mapsToInvalidate.pop_back_val();
|
|
|
|
for (auto &analysisPair : map->childAnalyses) {
|
|
|
|
analysisPair.second->invalidate(pa);
|
|
|
|
if (!analysisPair.second->childAnalyses.empty())
|
|
|
|
mapsToInvalidate.push_back(analysisPair.second.get());
|
|
|
|
}
|
|
|
|
}
|
Implement the initial AnalysisManagement infrastructure, with the introduction of the FunctionAnalysisManager and ModuleAnalysisManager classes. These classes provide analysis computation, caching, and invalidation for a specific IR unit. The invalidation is currently limited to either all or none, i.e. you cannot yet preserve specific analyses.
An analysis can be any class, but it must provide the following:
* A constructor for a given IR unit.
struct MyAnalysis {
// Compute this analysis with the provided module.
MyAnalysis(Module *module);
};
Analyses can be accessed from a Pass by calling either the 'getAnalysisResult<AnalysisT>' or 'getCachedAnalysisResult<AnalysisT>' methods. A FunctionPass may query for a cached analysis on the parent module with 'getCachedModuleAnalysisResult'. Similary, a ModulePass may query an analysis, it doesn't need to be cached, on a child function with 'getFunctionAnalysisResult'.
By default, when running a pass all cached analyses are set to be invalidated. If no transformation was performed, a pass can use the method 'markAllAnalysesPreserved' to preserve all analysis results. As noted above, preserving specific analyses is not yet supported.
PiperOrigin-RevId: 236505642
2019-03-03 13:46:58 +08:00
|
|
|
}
|
2019-03-11 06:44:47 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PassInstrumentation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
PassInstrumentation::~PassInstrumentation() {}
|
2019-03-19 02:56:18 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PassInstrumentor
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace mlir {
|
|
|
|
namespace detail {
|
|
|
|
struct PassInstrumentorImpl {
|
|
|
|
/// Mutex to keep instrumentation access thread-safe.
|
|
|
|
llvm::sys::SmartMutex<true> mutex;
|
|
|
|
|
|
|
|
/// Set of registered instrumentations.
|
|
|
|
std::vector<std::unique_ptr<PassInstrumentation>> instrumentations;
|
|
|
|
};
|
|
|
|
} // end namespace detail
|
|
|
|
} // end namespace mlir
|
|
|
|
|
|
|
|
PassInstrumentor::PassInstrumentor() : impl(new PassInstrumentorImpl()) {}
|
|
|
|
PassInstrumentor::~PassInstrumentor() {}
|
|
|
|
|
2019-09-09 10:57:25 +08:00
|
|
|
/// See PassInstrumentation::runBeforePipeline for details.
|
2019-10-01 08:44:31 +08:00
|
|
|
void PassInstrumentor::runBeforePipeline(
|
2020-09-03 04:09:07 +08:00
|
|
|
Identifier name,
|
2019-10-01 08:44:31 +08:00
|
|
|
const PassInstrumentation::PipelineParentInfo &parentInfo) {
|
2019-09-09 10:57:25 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
|
|
|
for (auto &instr : impl->instrumentations)
|
2019-10-01 08:44:31 +08:00
|
|
|
instr->runBeforePipeline(name, parentInfo);
|
2019-09-09 10:57:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// See PassInstrumentation::runAfterPipeline for details.
|
2019-10-01 08:44:31 +08:00
|
|
|
void PassInstrumentor::runAfterPipeline(
|
2020-09-03 04:09:07 +08:00
|
|
|
Identifier name,
|
2019-10-01 08:44:31 +08:00
|
|
|
const PassInstrumentation::PipelineParentInfo &parentInfo) {
|
2019-09-09 10:57:25 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
2019-10-01 08:44:31 +08:00
|
|
|
for (auto &instr : llvm::reverse(impl->instrumentations))
|
|
|
|
instr->runAfterPipeline(name, parentInfo);
|
2019-09-09 10:57:25 +08:00
|
|
|
}
|
|
|
|
|
2019-03-19 02:56:18 +08:00
|
|
|
/// See PassInstrumentation::runBeforePass for details.
|
2019-08-17 08:59:03 +08:00
|
|
|
void PassInstrumentor::runBeforePass(Pass *pass, Operation *op) {
|
2019-03-19 02:56:18 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
|
|
|
for (auto &instr : impl->instrumentations)
|
2019-08-17 08:59:03 +08:00
|
|
|
instr->runBeforePass(pass, op);
|
2019-03-19 02:56:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// See PassInstrumentation::runAfterPass for details.
|
2019-08-17 08:59:03 +08:00
|
|
|
void PassInstrumentor::runAfterPass(Pass *pass, Operation *op) {
|
2019-03-19 02:56:18 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
|
|
|
for (auto &instr : llvm::reverse(impl->instrumentations))
|
2019-08-17 08:59:03 +08:00
|
|
|
instr->runAfterPass(pass, op);
|
2019-03-19 02:56:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// See PassInstrumentation::runAfterPassFailed for details.
|
2019-08-17 08:59:03 +08:00
|
|
|
void PassInstrumentor::runAfterPassFailed(Pass *pass, Operation *op) {
|
2019-03-19 02:56:18 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
|
|
|
for (auto &instr : llvm::reverse(impl->instrumentations))
|
2019-08-17 08:59:03 +08:00
|
|
|
instr->runAfterPassFailed(pass, op);
|
2019-03-19 02:56:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// See PassInstrumentation::runBeforeAnalysis for details.
|
2020-04-11 14:46:52 +08:00
|
|
|
void PassInstrumentor::runBeforeAnalysis(StringRef name, TypeID id,
|
2019-08-17 08:59:03 +08:00
|
|
|
Operation *op) {
|
2019-03-19 02:56:18 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
|
|
|
for (auto &instr : impl->instrumentations)
|
2019-08-17 08:59:03 +08:00
|
|
|
instr->runBeforeAnalysis(name, id, op);
|
2019-03-19 02:56:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// See PassInstrumentation::runAfterAnalysis for details.
|
2020-04-11 14:46:52 +08:00
|
|
|
void PassInstrumentor::runAfterAnalysis(StringRef name, TypeID id,
|
2019-08-17 08:59:03 +08:00
|
|
|
Operation *op) {
|
2019-03-19 02:56:18 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
|
|
|
for (auto &instr : llvm::reverse(impl->instrumentations))
|
2019-08-17 08:59:03 +08:00
|
|
|
instr->runAfterAnalysis(name, id, op);
|
2019-03-19 02:56:18 +08:00
|
|
|
}
|
|
|
|
|
2019-09-15 08:44:10 +08:00
|
|
|
/// Add the given instrumentation to the collection.
|
|
|
|
void PassInstrumentor::addInstrumentation(
|
|
|
|
std::unique_ptr<PassInstrumentation> pi) {
|
2019-03-19 02:56:18 +08:00
|
|
|
llvm::sys::SmartScopedLock<true> instrumentationLock(impl->mutex);
|
2019-09-15 08:44:10 +08:00
|
|
|
impl->instrumentations.emplace_back(std::move(pi));
|
2019-03-19 02:56:18 +08:00
|
|
|
}
|