2019-06-16 11:19:36 +08:00
|
|
|
//===- Block.cpp - MLIR Block Class ---------------------------------------===//
|
2018-07-04 08:51:28 +08:00
|
|
|
//
|
2020-01-26 11:58:30 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
2019-12-24 01:35:36 +08:00
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-07-04 08:51:28 +08:00
|
|
|
//
|
2019-12-24 01:35:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-07-04 08:51:28 +08:00
|
|
|
|
2018-12-29 05:07:39 +08:00
|
|
|
#include "mlir/IR/Block.h"
|
2018-12-28 03:07:34 +08:00
|
|
|
#include "mlir/IR/Builders.h"
|
2019-03-27 05:45:38 +08:00
|
|
|
#include "mlir/IR/Operation.h"
|
2020-10-23 01:39:39 +08:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2018-07-04 08:51:28 +08:00
|
|
|
using namespace mlir;
|
|
|
|
|
2019-01-10 16:52:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Block
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-12-29 05:07:39 +08:00
|
|
|
Block::~Block() {
|
2019-11-07 08:08:51 +08:00
|
|
|
assert(!verifyOpOrder() && "Expected valid operation ordering.");
|
2018-12-25 10:01:01 +08:00
|
|
|
clear();
|
2019-12-24 04:36:20 +08:00
|
|
|
for (BlockArgument arg : arguments)
|
|
|
|
arg.destroy();
|
2018-12-25 10:01:01 +08:00
|
|
|
}
|
2018-07-04 08:51:28 +08:00
|
|
|
|
2019-11-07 08:08:51 +08:00
|
|
|
Region *Block::getParent() const { return parentValidOpOrderPair.getPointer(); }
|
2019-06-16 11:19:36 +08:00
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Returns the closest surrounding operation that contains this block or
|
2019-08-10 11:07:25 +08:00
|
|
|
/// nullptr if this block is unlinked.
|
|
|
|
Operation *Block::getParentOp() {
|
|
|
|
return getParent() ? getParent()->getParentOp() : nullptr;
|
2018-07-15 07:44:22 +08:00
|
|
|
}
|
|
|
|
|
2019-07-18 05:45:53 +08:00
|
|
|
/// Return if this block is the entry block in the parent region.
|
|
|
|
bool Block::isEntryBlock() { return this == &getParent()->front(); }
|
|
|
|
|
2019-07-09 02:20:26 +08:00
|
|
|
/// Insert this block (which must not already be in a region) right before the
|
|
|
|
/// specified block.
|
2018-12-31 08:22:50 +08:00
|
|
|
void Block::insertBefore(Block *block) {
|
|
|
|
assert(!getParent() && "already inserted into a block!");
|
|
|
|
assert(block->getParent() && "cannot insert before a block without a parent");
|
2019-12-17 04:09:14 +08:00
|
|
|
block->getParent()->getBlocks().insert(block->getIterator(), this);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unlink this block from its current region and insert it right before the
|
|
|
|
/// specific block.
|
|
|
|
void Block::moveBefore(Block *block) {
|
|
|
|
assert(block->getParent() && "cannot insert before a block without a parent");
|
|
|
|
block->getParent()->getBlocks().splice(
|
|
|
|
block->getIterator(), getParent()->getBlocks(), getIterator());
|
2018-12-31 08:22:50 +08:00
|
|
|
}
|
|
|
|
|
2019-07-04 04:21:24 +08:00
|
|
|
/// Unlink this Block from its parent Region and delete it.
|
2019-05-28 22:41:17 +08:00
|
|
|
void Block::erase() {
|
|
|
|
assert(getParent() && "Block has no parent");
|
|
|
|
getParent()->getBlocks().erase(this);
|
2018-12-31 08:22:50 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Returns 'op' if 'op' lies in this block, or otherwise finds the
|
|
|
|
/// ancestor operation of 'op' that lies in this block. Returns nullptr if
|
2018-12-29 05:07:39 +08:00
|
|
|
/// the latter fails.
|
2019-11-07 08:08:51 +08:00
|
|
|
Operation *Block::findAncestorOpInBlock(Operation &op) {
|
2019-03-27 08:05:09 +08:00
|
|
|
// Traverse up the operation hierarchy starting from the owner of operand to
|
2019-11-07 08:08:51 +08:00
|
|
|
// find the ancestor operation that resides in the block of 'forOp'.
|
|
|
|
auto *currOp = &op;
|
|
|
|
while (currOp->getBlock() != this) {
|
|
|
|
currOp = currOp->getParentOp();
|
|
|
|
if (!currOp)
|
2018-10-19 02:14:26 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
2019-11-07 08:08:51 +08:00
|
|
|
return currOp;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-12-25 10:01:01 +08:00
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// This drops all operand uses from operations within this block, which is
|
2019-01-15 02:30:20 +08:00
|
|
|
/// an essential step in breaking cyclic dependences between references when
|
|
|
|
/// they are to be deleted.
|
|
|
|
void Block::dropAllReferences() {
|
2019-03-27 08:05:09 +08:00
|
|
|
for (Operation &i : *this)
|
2019-01-15 02:30:20 +08:00
|
|
|
i.dropAllReferences();
|
|
|
|
}
|
|
|
|
|
Allow creating standalone Regions
Currently, regions can only be constructed by passing in a `Function` or an
`Instruction` pointer referencing the parent object, unlike `Function`s or
`Instruction`s themselves that can be created without a parent. It leads to a
rather complex flow in operation construction where one has to create the
operation first before being able to work with its regions. It may be
necessary to work with the regions before the operation is created. In
particular, in `build` and `parse` functions that are executed _before_ the
operation is created in cases where boilerplate region manipulation is required
(for example, inserting the hypothetical default terminator in affine regions).
Allow creating standalone regions. Such regions are meant to own a list of
blocks and transfer them to other regions on demand.
Each instruction stores a fixed number of regions as trailing objects and has
ownership of them. This decreases the size of the Instruction object for the
common case of instructions without regions. Keep this behavior intact. To
allow some flexibility in construction, make OperationState store an owning
vector of regions. When the Builder creates an Instruction from
OperationState, the bodies of the regions are transferred into the
instruction-owned regions to minimize copying. Thus, it becomes possible to
fill standalone regions with blocks and move them to an operation when it is
constructed, or move blocks from a region to an operation region, e.g., for
inlining.
PiperOrigin-RevId: 240368183
2019-03-27 00:55:06 +08:00
|
|
|
void Block::dropAllDefinedValueUses() {
|
2019-12-23 13:59:55 +08:00
|
|
|
for (auto arg : getArguments())
|
2020-01-12 00:54:04 +08:00
|
|
|
arg.dropAllUses();
|
2019-03-27 08:05:09 +08:00
|
|
|
for (auto &op : *this)
|
|
|
|
op.dropAllDefinedValueUses();
|
Allow creating standalone Regions
Currently, regions can only be constructed by passing in a `Function` or an
`Instruction` pointer referencing the parent object, unlike `Function`s or
`Instruction`s themselves that can be created without a parent. It leads to a
rather complex flow in operation construction where one has to create the
operation first before being able to work with its regions. It may be
necessary to work with the regions before the operation is created. In
particular, in `build` and `parse` functions that are executed _before_ the
operation is created in cases where boilerplate region manipulation is required
(for example, inserting the hypothetical default terminator in affine regions).
Allow creating standalone regions. Such regions are meant to own a list of
blocks and transfer them to other regions on demand.
Each instruction stores a fixed number of regions as trailing objects and has
ownership of them. This decreases the size of the Instruction object for the
common case of instructions without regions. Keep this behavior intact. To
allow some flexibility in construction, make OperationState store an owning
vector of regions. When the Builder creates an Instruction from
OperationState, the bodies of the regions are transferred into the
instruction-owned regions to minimize copying. Thus, it becomes possible to
fill standalone regions with blocks and move them to an operation when it is
constructed, or move blocks from a region to an operation region, e.g., for
inlining.
PiperOrigin-RevId: 240368183
2019-03-27 00:55:06 +08:00
|
|
|
dropAllUses();
|
|
|
|
}
|
|
|
|
|
2019-06-16 11:19:36 +08:00
|
|
|
/// Returns true if the ordering of the child operations is valid, false
|
|
|
|
/// otherwise.
|
2019-11-07 08:08:51 +08:00
|
|
|
bool Block::isOpOrderValid() { return parentValidOpOrderPair.getInt(); }
|
2019-06-16 11:19:36 +08:00
|
|
|
|
|
|
|
/// Invalidates the current ordering of operations.
|
2019-11-07 08:08:51 +08:00
|
|
|
void Block::invalidateOpOrder() {
|
2019-06-16 11:19:36 +08:00
|
|
|
// Validate the current ordering.
|
2019-11-07 08:08:51 +08:00
|
|
|
assert(!verifyOpOrder());
|
|
|
|
parentValidOpOrderPair.setInt(false);
|
2019-06-16 11:19:36 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Verifies the current ordering of child operations. Returns false if the
|
2019-01-25 09:16:30 +08:00
|
|
|
/// order is valid, true otherwise.
|
2019-11-07 08:08:51 +08:00
|
|
|
bool Block::verifyOpOrder() {
|
2019-01-25 09:16:30 +08:00
|
|
|
// The order is already known to be invalid.
|
2019-11-07 08:08:51 +08:00
|
|
|
if (!isOpOrderValid())
|
2019-01-25 09:16:30 +08:00
|
|
|
return false;
|
2019-03-27 08:05:09 +08:00
|
|
|
// The order is valid if there are less than 2 operations.
|
|
|
|
if (operations.empty() || std::next(operations.begin()) == operations.end())
|
2019-01-25 09:16:30 +08:00
|
|
|
return false;
|
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
Operation *prev = nullptr;
|
2019-01-25 09:16:30 +08:00
|
|
|
for (auto &i : *this) {
|
2019-03-27 08:05:09 +08:00
|
|
|
// The previous operation must have a smaller order index than the next as
|
2019-01-25 09:16:30 +08:00
|
|
|
// it appears earlier in the list.
|
2019-12-05 08:09:41 +08:00
|
|
|
if (prev && prev->orderIndex != Operation::kInvalidOrderIdx &&
|
|
|
|
prev->orderIndex >= i.orderIndex)
|
2019-01-25 09:16:30 +08:00
|
|
|
return true;
|
|
|
|
prev = &i;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Recomputes the ordering of child operations within the block.
|
2019-11-07 08:08:51 +08:00
|
|
|
void Block::recomputeOpOrder() {
|
|
|
|
parentValidOpOrderPair.setInt(true);
|
2019-01-25 09:16:30 +08:00
|
|
|
|
|
|
|
unsigned orderIndex = 0;
|
2019-03-27 08:05:09 +08:00
|
|
|
for (auto &op : *this)
|
2019-12-05 08:09:41 +08:00
|
|
|
op.orderIndex = (orderIndex += Operation::kOrderStride);
|
2019-01-25 09:16:30 +08:00
|
|
|
}
|
|
|
|
|
2018-12-25 10:01:01 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Argument list management.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-06 04:41:25 +08:00
|
|
|
/// Return a range containing the types of the arguments for this block.
|
|
|
|
auto Block::getArgumentTypes() -> ValueTypeRange<BlockArgListType> {
|
|
|
|
return ValueTypeRange<BlockArgListType>(getArguments());
|
|
|
|
}
|
|
|
|
|
2021-05-24 05:08:31 +08:00
|
|
|
BlockArgument Block::addArgument(Type type, Optional<Location> loc) {
|
|
|
|
// TODO: Require locations for BlockArguments.
|
|
|
|
if (!loc.hasValue()) {
|
|
|
|
// Use the location of the parent operation if the block is attached.
|
|
|
|
if (Operation *parentOp = getParentOp())
|
|
|
|
loc = parentOp->getLoc();
|
|
|
|
else
|
|
|
|
loc = UnknownLoc::get(type.getContext());
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockArgument arg = BlockArgument::create(type, this, arguments.size(), *loc);
|
2018-12-25 10:01:01 +08:00
|
|
|
arguments.push_back(arg);
|
|
|
|
return arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Add one argument to the argument list for each type specified in the list.
|
2021-05-24 05:08:31 +08:00
|
|
|
auto Block::addArguments(TypeRange types, ArrayRef<Location> locs)
|
|
|
|
-> iterator_range<args_iterator> {
|
|
|
|
// TODO: Require locations for BlockArguments.
|
|
|
|
assert((locs.empty() || types.size() == locs.size()) &&
|
|
|
|
"incorrect number of block argument locations");
|
2020-03-06 04:41:25 +08:00
|
|
|
size_t initialSize = arguments.size();
|
2021-05-24 05:08:31 +08:00
|
|
|
|
2020-03-06 04:41:25 +08:00
|
|
|
arguments.reserve(initialSize + types.size());
|
2021-05-24 05:08:31 +08:00
|
|
|
|
|
|
|
// TODO: Require locations for BlockArguments.
|
|
|
|
if (locs.empty()) {
|
|
|
|
for (auto type : types)
|
|
|
|
addArgument(type);
|
|
|
|
} else {
|
|
|
|
for (auto typeAndLoc : llvm::zip(types, locs))
|
|
|
|
addArgument(std::get<0>(typeAndLoc), std::get<1>(typeAndLoc));
|
|
|
|
}
|
2018-12-25 10:01:01 +08:00
|
|
|
return {arguments.data() + initialSize, arguments.data() + arguments.size()};
|
|
|
|
}
|
|
|
|
|
2021-05-24 05:08:31 +08:00
|
|
|
BlockArgument Block::insertArgument(unsigned index, Type type,
|
|
|
|
Optional<Location> loc) {
|
|
|
|
// TODO: Require locations for BlockArguments.
|
|
|
|
if (!loc.hasValue()) {
|
|
|
|
// Use the location of the parent operation if the block is attached.
|
|
|
|
if (Operation *parentOp = getParentOp())
|
|
|
|
loc = parentOp->getLoc();
|
|
|
|
else
|
|
|
|
loc = UnknownLoc::get(type.getContext());
|
|
|
|
}
|
|
|
|
|
|
|
|
auto arg = BlockArgument::create(type, this, index, *loc);
|
[mlir] Add in-dialect lowering of gpu.all_reduce.
Reviewers: ftynse, nicolasvasilache, herhut
Reviewed By: ftynse, herhut
Subscribers: liufengdb, aartbik, herhut, merge_guards_bot, mgorny, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72129
2020-01-20 20:30:24 +08:00
|
|
|
assert(index <= arguments.size());
|
|
|
|
arguments.insert(arguments.begin() + index, arg);
|
2021-02-28 01:20:28 +08:00
|
|
|
// Update the cached position for all the arguments after the newly inserted
|
|
|
|
// one.
|
|
|
|
++index;
|
|
|
|
for (BlockArgument arg : llvm::drop_begin(arguments, index))
|
|
|
|
arg.setArgNumber(index++);
|
[mlir] Add in-dialect lowering of gpu.all_reduce.
Reviewers: ftynse, nicolasvasilache, herhut
Reviewed By: ftynse, herhut
Subscribers: liufengdb, aartbik, herhut, merge_guards_bot, mgorny, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72129
2020-01-20 20:30:24 +08:00
|
|
|
return arg;
|
|
|
|
}
|
|
|
|
|
2021-02-28 01:20:28 +08:00
|
|
|
/// Insert one value to the given position of the argument list. The existing
|
|
|
|
/// arguments are shifted. The block is expected not to have predecessors.
|
2021-05-24 05:08:31 +08:00
|
|
|
BlockArgument Block::insertArgument(args_iterator it, Type type,
|
|
|
|
Optional<Location> loc) {
|
2021-02-28 01:20:28 +08:00
|
|
|
assert(llvm::empty(getPredecessors()) &&
|
|
|
|
"cannot insert arguments to blocks with predecessors");
|
2021-05-24 05:08:31 +08:00
|
|
|
return insertArgument(it->getArgNumber(), type, loc);
|
2021-02-28 01:20:28 +08:00
|
|
|
}
|
|
|
|
|
2020-03-06 04:41:25 +08:00
|
|
|
void Block::eraseArgument(unsigned index) {
|
2018-12-25 10:01:01 +08:00
|
|
|
assert(index < arguments.size());
|
2019-12-24 04:36:20 +08:00
|
|
|
arguments[index].destroy();
|
2018-12-25 10:01:01 +08:00
|
|
|
arguments.erase(arguments.begin() + index);
|
2021-02-28 01:20:28 +08:00
|
|
|
for (BlockArgument arg : llvm::drop_begin(arguments, index))
|
|
|
|
arg.setArgNumber(index++);
|
2018-12-25 10:01:01 +08:00
|
|
|
}
|
|
|
|
|
2020-10-23 01:39:39 +08:00
|
|
|
void Block::eraseArguments(ArrayRef<unsigned> argIndices) {
|
|
|
|
llvm::BitVector eraseIndices(getNumArguments());
|
|
|
|
for (unsigned i : argIndices)
|
|
|
|
eraseIndices.set(i);
|
|
|
|
eraseArguments(eraseIndices);
|
|
|
|
}
|
|
|
|
|
2021-03-11 08:13:25 +08:00
|
|
|
void Block::eraseArguments(const llvm::BitVector &eraseIndices) {
|
|
|
|
eraseArguments(
|
|
|
|
[&](BlockArgument arg) { return eraseIndices.test(arg.getArgNumber()); });
|
|
|
|
}
|
|
|
|
|
|
|
|
void Block::eraseArguments(function_ref<bool(BlockArgument)> shouldEraseFn) {
|
|
|
|
auto firstDead = llvm::find_if(arguments, shouldEraseFn);
|
|
|
|
if (firstDead == arguments.end())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Destroy the first dead argument, this avoids reapplying the predicate to
|
|
|
|
// it.
|
|
|
|
unsigned index = firstDead->getArgNumber();
|
|
|
|
firstDead->destroy();
|
|
|
|
|
|
|
|
// Iterate the remaining arguments to remove any that are now dead.
|
|
|
|
for (auto it = std::next(firstDead), e = arguments.end(); it != e; ++it) {
|
|
|
|
// Destroy dead arguments, and shift those that are still live.
|
|
|
|
if (shouldEraseFn(*it)) {
|
|
|
|
it->destroy();
|
|
|
|
} else {
|
|
|
|
it->setArgNumber(index++);
|
|
|
|
*firstDead++ = *it;
|
2021-02-28 01:20:28 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 08:13:25 +08:00
|
|
|
arguments.erase(firstDead, arguments.end());
|
2020-01-08 03:00:54 +08:00
|
|
|
}
|
|
|
|
|
2018-12-25 10:01:01 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Terminator management
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Get the terminator operation of this block. This function asserts that
|
|
|
|
/// the block has a valid terminator operation.
|
|
|
|
Operation *Block::getTerminator() {
|
2021-02-10 03:41:10 +08:00
|
|
|
assert(!empty() && back().mightHaveTrait<OpTrait::IsTerminator>());
|
2019-02-09 01:52:26 +08:00
|
|
|
return &back();
|
2018-12-25 10:01:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Indexed successor access.
|
2019-03-22 08:53:00 +08:00
|
|
|
unsigned Block::getNumSuccessors() {
|
2019-02-09 01:52:26 +08:00
|
|
|
return empty() ? 0 : back().getNumSuccessors();
|
2018-12-25 10:01:01 +08:00
|
|
|
}
|
|
|
|
|
2018-12-29 05:07:39 +08:00
|
|
|
Block *Block::getSuccessor(unsigned i) {
|
2019-02-09 01:52:26 +08:00
|
|
|
assert(i < getNumSuccessors());
|
2018-12-25 10:01:01 +08:00
|
|
|
return getTerminator()->getSuccessor(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If this block has exactly one predecessor, return it. Otherwise, return
|
|
|
|
/// null.
|
|
|
|
///
|
|
|
|
/// Note that multiple edges from a single block (e.g. if you have a cond
|
|
|
|
/// branch with the same block as the true/false destinations) is not
|
|
|
|
/// considered to be a single predecessor.
|
2018-12-29 05:07:39 +08:00
|
|
|
Block *Block::getSinglePredecessor() {
|
2018-12-25 10:01:01 +08:00
|
|
|
auto it = pred_begin();
|
|
|
|
if (it == pred_end())
|
|
|
|
return nullptr;
|
|
|
|
auto *firstPred = *it;
|
|
|
|
++it;
|
|
|
|
return it == pred_end() ? firstPred : nullptr;
|
|
|
|
}
|
2018-12-27 07:31:54 +08:00
|
|
|
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 19:40:25 +08:00
|
|
|
/// If this block has a unique predecessor, i.e., all incoming edges originate
|
|
|
|
/// from one block, return it. Otherwise, return null.
|
|
|
|
Block *Block::getUniquePredecessor() {
|
|
|
|
auto it = pred_begin(), e = pred_end();
|
|
|
|
if (it == e)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Check for any conflicting predecessors.
|
|
|
|
auto *firstPred = *it;
|
|
|
|
for (++it; it != e; ++it)
|
|
|
|
if (*it != firstPred)
|
|
|
|
return nullptr;
|
|
|
|
return firstPred;
|
|
|
|
}
|
|
|
|
|
2018-12-28 03:07:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Other
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Split the block into two blocks before the specified operation or
|
2018-12-31 08:22:50 +08:00
|
|
|
/// iterator.
|
2018-12-28 03:07:34 +08:00
|
|
|
///
|
2019-03-27 08:05:09 +08:00
|
|
|
/// Note that all operations BEFORE the specified iterator stay as part of
|
|
|
|
/// the original basic block, and the rest of the operations in the original
|
2018-12-31 08:22:50 +08:00
|
|
|
/// block are moved to the new block, including the old terminator. The
|
|
|
|
/// original block is left without a terminator.
|
2018-12-28 03:07:34 +08:00
|
|
|
///
|
2018-12-31 08:22:50 +08:00
|
|
|
/// The newly formed Block is returned, and the specified iterator is
|
|
|
|
/// invalidated.
|
2018-12-29 05:07:39 +08:00
|
|
|
Block *Block::splitBlock(iterator splitBefore) {
|
2018-12-28 03:07:34 +08:00
|
|
|
// Start by creating a new basic block, and insert it immediate after this
|
2019-07-09 02:20:26 +08:00
|
|
|
// one in the containing region.
|
2018-12-29 05:07:39 +08:00
|
|
|
auto newBB = new Block();
|
2019-05-28 22:41:17 +08:00
|
|
|
getParent()->getBlocks().insert(std::next(Region::iterator(this)), newBB);
|
2018-12-28 03:07:34 +08:00
|
|
|
|
2019-07-09 02:20:26 +08:00
|
|
|
// Move all of the operations from the split point to the end of the region
|
2018-12-28 03:07:34 +08:00
|
|
|
// into the new block.
|
2019-03-27 08:05:09 +08:00
|
|
|
newBB->getOperations().splice(newBB->end(), getOperations(), splitBefore,
|
|
|
|
end());
|
2018-12-28 03:07:34 +08:00
|
|
|
return newBB;
|
|
|
|
}
|
2019-06-22 08:00:01 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Predecessors
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Block *PredecessorIterator::unwrap(BlockOperand &value) {
|
|
|
|
return value.getOwner()->getBlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the successor number in the predecessor terminator.
|
|
|
|
unsigned PredecessorIterator::getSuccessorIndex() const {
|
|
|
|
return I->getOperandNumber();
|
|
|
|
}
|
2019-12-10 07:24:10 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2020-09-03 06:33:19 +08:00
|
|
|
// SuccessorRange
|
2019-12-10 07:24:10 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
[mlir][PDL] Add support for PDL bytecode and expose PDL support to OwningRewritePatternList
PDL patterns are now supported via a new `PDLPatternModule` class. This class contains a ModuleOp with the pdl::PatternOp operations representing the patterns, as well as a collection of registered C++ functions for native constraints/creations/rewrites/etc. that may be invoked via the pdl patterns. Instances of this class are added to an OwningRewritePatternList in the same fashion as C++ RewritePatterns, i.e. via the `insert` method.
The PDL bytecode is an in-memory representation of the PDL interpreter dialect that can be efficiently interpreted/executed. The representation of the bytecode boils down to a code array(for opcodes/memory locations/etc) and a memory buffer(for storing attributes/operations/values/any other data necessary). The bytecode operations are effectively a 1-1 mapping to the PDLInterp dialect operations, with a few exceptions in cases where the in-memory representation of the bytecode can be more efficient than the MLIR representation. For example, a generic `AreEqual` bytecode op can be used to represent AreEqualOp, CheckAttributeOp, and CheckTypeOp.
The execution of the bytecode is split into two phases: matching and rewriting. When matching, all of the matched patterns are collected to avoid the overhead of re-running parts of the matcher. These matched patterns are then considered alongside the native C++ patterns, which rewrite immediately in-place via `RewritePattern::matchAndRewrite`, for the given root operation. When a PDL pattern is matched and has the highest benefit, it is passed back to the bytecode to execute its rewriter.
Differential Revision: https://reviews.llvm.org/D89107
2020-12-02 06:30:18 +08:00
|
|
|
SuccessorRange::SuccessorRange() : SuccessorRange(nullptr, 0) {}
|
|
|
|
|
|
|
|
SuccessorRange::SuccessorRange(Block *block) : SuccessorRange() {
|
2021-06-17 03:42:41 +08:00
|
|
|
if (block->empty() || llvm::hasSingleElement(*block->getParent()))
|
|
|
|
return;
|
|
|
|
Operation *term = &block->back();
|
|
|
|
if ((count = term->getNumSuccessors()))
|
|
|
|
base = term->getBlockOperands().data();
|
2019-12-10 07:24:10 +08:00
|
|
|
}
|
2019-12-24 05:05:38 +08:00
|
|
|
|
[mlir][PDL] Add support for PDL bytecode and expose PDL support to OwningRewritePatternList
PDL patterns are now supported via a new `PDLPatternModule` class. This class contains a ModuleOp with the pdl::PatternOp operations representing the patterns, as well as a collection of registered C++ functions for native constraints/creations/rewrites/etc. that may be invoked via the pdl patterns. Instances of this class are added to an OwningRewritePatternList in the same fashion as C++ RewritePatterns, i.e. via the `insert` method.
The PDL bytecode is an in-memory representation of the PDL interpreter dialect that can be efficiently interpreted/executed. The representation of the bytecode boils down to a code array(for opcodes/memory locations/etc) and a memory buffer(for storing attributes/operations/values/any other data necessary). The bytecode operations are effectively a 1-1 mapping to the PDLInterp dialect operations, with a few exceptions in cases where the in-memory representation of the bytecode can be more efficient than the MLIR representation. For example, a generic `AreEqual` bytecode op can be used to represent AreEqualOp, CheckAttributeOp, and CheckTypeOp.
The execution of the bytecode is split into two phases: matching and rewriting. When matching, all of the matched patterns are collected to avoid the overhead of re-running parts of the matcher. These matched patterns are then considered alongside the native C++ patterns, which rewrite immediately in-place via `RewritePattern::matchAndRewrite`, for the given root operation. When a PDL pattern is matched and has the highest benefit, it is passed back to the bytecode to execute its rewriter.
Differential Revision: https://reviews.llvm.org/D89107
2020-12-02 06:30:18 +08:00
|
|
|
SuccessorRange::SuccessorRange(Operation *term) : SuccessorRange() {
|
2019-12-24 05:05:38 +08:00
|
|
|
if ((count = term->getNumSuccessors()))
|
|
|
|
base = term->getBlockOperands().data();
|
|
|
|
}
|
2020-09-03 06:33:19 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BlockRange
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
BlockRange::BlockRange(ArrayRef<Block *> blocks) : BlockRange(nullptr, 0) {
|
|
|
|
if ((count = blocks.size()))
|
|
|
|
base = blocks.data();
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockRange::BlockRange(SuccessorRange successors)
|
|
|
|
: BlockRange(successors.begin().getBase(), successors.size()) {}
|
|
|
|
|
|
|
|
/// See `llvm::detail::indexed_accessor_range_base` for details.
|
|
|
|
BlockRange::OwnerT BlockRange::offset_base(OwnerT object, ptrdiff_t index) {
|
|
|
|
if (auto *operand = object.dyn_cast<BlockOperand *>())
|
|
|
|
return {operand + index};
|
|
|
|
return {object.dyn_cast<Block *const *>() + index};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// See `llvm::detail::indexed_accessor_range_base` for details.
|
|
|
|
Block *BlockRange::dereference_iterator(OwnerT object, ptrdiff_t index) {
|
|
|
|
if (const auto *operand = object.dyn_cast<BlockOperand *>())
|
|
|
|
return operand[index].get();
|
|
|
|
return object.dyn_cast<Block *const *>()[index];
|
|
|
|
}
|