2002-09-09 02:59:35 +08:00
|
|
|
//===-- BasicBlock.cpp - Implement BasicBlock related methods -------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
2013-01-02 17:10:48 +08:00
|
|
|
// This file implements the BasicBlock class for the IR library.
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "SymbolTableListTraitsImpl.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2002-06-26 00:13:24 +08:00
|
|
|
#include <algorithm>
|
2015-10-07 07:24:35 +08:00
|
|
|
|
2003-11-22 00:52:05 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2009-03-07 20:33:24 +08:00
|
|
|
ValueSymbolTable *BasicBlock::getValueSymbolTable() {
|
|
|
|
if (Function *F = getParent())
|
2016-09-17 14:00:02 +08:00
|
|
|
return F->getValueSymbolTable();
|
2014-04-09 14:08:46 +08:00
|
|
|
return nullptr;
|
2007-04-17 11:26:42 +08:00
|
|
|
}
|
|
|
|
|
2009-07-22 08:24:57 +08:00
|
|
|
LLVMContext &BasicBlock::getContext() const {
|
|
|
|
return getType()->getContext();
|
2009-07-06 06:41:43 +08:00
|
|
|
}
|
|
|
|
|
[IR] Lazily number instructions for local dominance queries
Essentially, fold OrderedBasicBlock into BasicBlock, and make it
auto-invalidate the instruction ordering when new instructions are
added. Notably, we don't need to invalidate it when removing
instructions, which is helpful when a pass mostly delete dead
instructions rather than transforming them.
The downside is that Instruction grows from 56 bytes to 64 bytes. The
resulting LLVM code is substantially simpler and automatically handles
invalidation, which makes me think that this is the right speed and size
tradeoff.
The important change is in SymbolTableTraitsImpl.h, where the numbering
is invalidated. Everything else should be straightforward.
We probably want to implement a fancier re-numbering scheme so that
local updates don't invalidate the ordering, but I plan for that to be
future work, maybe for someone else.
Reviewed By: lattner, vsk, fhahn, dexonsmith
Differential Revision: https://reviews.llvm.org/D51664
2020-02-19 06:33:54 +08:00
|
|
|
template <> void llvm::invalidateParentIListOrdering(BasicBlock *BB) {
|
|
|
|
BB->invalidateOrders();
|
|
|
|
}
|
|
|
|
|
2002-06-26 00:13:24 +08:00
|
|
|
// Explicit instantiation of SymbolTableListTraits since some of the methods
|
|
|
|
// are not in the public header file...
|
2015-10-08 04:05:10 +08:00
|
|
|
template class llvm::SymbolTableListTraits<Instruction>;
|
2002-06-26 00:13:24 +08:00
|
|
|
|
2009-08-14 05:58:54 +08:00
|
|
|
BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent,
|
2008-04-26 00:53:59 +08:00
|
|
|
BasicBlock *InsertBefore)
|
2014-04-09 14:08:46 +08:00
|
|
|
: Value(Type::getLabelTy(C), Value::BasicBlockVal), Parent(nullptr) {
|
2002-09-26 13:03:22 +08:00
|
|
|
|
2014-08-02 05:22:04 +08:00
|
|
|
if (NewParent)
|
|
|
|
insertInto(NewParent, InsertBefore);
|
|
|
|
else
|
|
|
|
assert(!InsertBefore &&
|
2004-02-04 11:57:50 +08:00
|
|
|
"Cannot insert block before another block with no function!");
|
2011-08-10 07:12:56 +08:00
|
|
|
|
2007-02-12 13:18:08 +08:00
|
|
|
setName(Name);
|
2002-09-26 13:03:22 +08:00
|
|
|
}
|
|
|
|
|
2014-08-02 05:22:04 +08:00
|
|
|
void BasicBlock::insertInto(Function *NewParent, BasicBlock *InsertBefore) {
|
|
|
|
assert(NewParent && "Expected a parent");
|
|
|
|
assert(!Parent && "Already has a parent");
|
|
|
|
|
|
|
|
if (InsertBefore)
|
2015-10-09 07:49:46 +08:00
|
|
|
NewParent->getBasicBlockList().insert(InsertBefore->getIterator(), this);
|
2014-08-02 05:22:04 +08:00
|
|
|
else
|
|
|
|
NewParent->getBasicBlockList().push_back(this);
|
|
|
|
}
|
2011-04-11 07:18:04 +08:00
|
|
|
|
2007-12-10 10:14:30 +08:00
|
|
|
BasicBlock::~BasicBlock() {
|
[IR] Lazily number instructions for local dominance queries
Essentially, fold OrderedBasicBlock into BasicBlock, and make it
auto-invalidate the instruction ordering when new instructions are
added. Notably, we don't need to invalidate it when removing
instructions, which is helpful when a pass mostly delete dead
instructions rather than transforming them.
The downside is that Instruction grows from 56 bytes to 64 bytes. The
resulting LLVM code is substantially simpler and automatically handles
invalidation, which makes me think that this is the right speed and size
tradeoff.
The important change is in SymbolTableTraitsImpl.h, where the numbering
is invalidated. Everything else should be straightforward.
We probably want to implement a fancier re-numbering scheme so that
local updates don't invalidate the ordering, but I plan for that to be
future work, maybe for someone else.
Reviewed By: lattner, vsk, fhahn, dexonsmith
Differential Revision: https://reviews.llvm.org/D51664
2020-02-19 06:33:54 +08:00
|
|
|
validateInstrOrdering();
|
|
|
|
|
2009-10-31 06:39:36 +08:00
|
|
|
// If the address of the block is taken and it is being deleted (e.g. because
|
|
|
|
// it is dead), this means that there is either a dangling constant expr
|
|
|
|
// hanging off the block, or an undefined use of the block (source code
|
|
|
|
// expecting the address of a label to keep the block alive even though there
|
|
|
|
// is no indirect branch). Handle these cases by zapping the BlockAddress
|
2009-11-01 09:27:45 +08:00
|
|
|
// nodes. There are no other possible uses at this point.
|
2009-10-31 06:39:36 +08:00
|
|
|
if (hasAddressTaken()) {
|
|
|
|
assert(!use_empty() && "There should be at least one blockaddress!");
|
2009-11-01 09:27:45 +08:00
|
|
|
Constant *Replacement =
|
|
|
|
ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1);
|
2009-10-31 06:39:36 +08:00
|
|
|
while (!use_empty()) {
|
2014-03-09 11:16:01 +08:00
|
|
|
BlockAddress *BA = cast<BlockAddress>(user_back());
|
2009-11-01 09:27:45 +08:00
|
|
|
BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
|
|
|
|
BA->getType()));
|
2009-10-31 06:39:36 +08:00
|
|
|
BA->destroyConstant();
|
|
|
|
}
|
|
|
|
}
|
2011-08-10 07:12:56 +08:00
|
|
|
|
2014-04-15 14:32:26 +08:00
|
|
|
assert(getParent() == nullptr && "BasicBlock still linked into the program!");
|
2007-12-10 10:14:30 +08:00
|
|
|
dropAllReferences();
|
|
|
|
InstList.clear();
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2002-09-07 05:33:15 +08:00
|
|
|
void BasicBlock::setParent(Function *parent) {
|
2007-04-17 11:26:42 +08:00
|
|
|
// Set Parent=parent, updating instruction symtab entries as appropriate.
|
|
|
|
InstList.setSymTabObject(&Parent, parent);
|
2002-09-07 05:33:15 +08:00
|
|
|
}
|
|
|
|
|
2018-04-19 17:48:07 +08:00
|
|
|
iterator_range<filter_iterator<BasicBlock::const_iterator,
|
|
|
|
std::function<bool(const Instruction &)>>>
|
|
|
|
BasicBlock::instructionsWithoutDebug() const {
|
|
|
|
std::function<bool(const Instruction &)> Fn = [](const Instruction &I) {
|
|
|
|
return !isa<DbgInfoIntrinsic>(I);
|
|
|
|
};
|
|
|
|
return make_filter_range(*this, Fn);
|
|
|
|
}
|
|
|
|
|
|
|
|
iterator_range<filter_iterator<BasicBlock::iterator,
|
|
|
|
std::function<bool(Instruction &)>>>
|
|
|
|
BasicBlock::instructionsWithoutDebug() {
|
|
|
|
std::function<bool(Instruction &)> Fn = [](Instruction &I) {
|
|
|
|
return !isa<DbgInfoIntrinsic>(I);
|
|
|
|
};
|
|
|
|
return make_filter_range(*this, Fn);
|
|
|
|
}
|
|
|
|
|
2019-09-20 21:22:59 +08:00
|
|
|
filter_iterator<BasicBlock::const_iterator,
|
|
|
|
std::function<bool(const Instruction &)>>::difference_type
|
|
|
|
BasicBlock::sizeWithoutDebug() const {
|
|
|
|
return std::distance(instructionsWithoutDebug().begin(),
|
|
|
|
instructionsWithoutDebug().end());
|
|
|
|
}
|
|
|
|
|
2004-10-12 06:21:39 +08:00
|
|
|
void BasicBlock::removeFromParent() {
|
2015-10-09 07:49:46 +08:00
|
|
|
getParent()->getBasicBlockList().remove(getIterator());
|
2004-10-12 06:21:39 +08:00
|
|
|
}
|
|
|
|
|
2015-04-03 09:20:33 +08:00
|
|
|
iplist<BasicBlock>::iterator BasicBlock::eraseFromParent() {
|
2015-10-09 07:49:46 +08:00
|
|
|
return getParent()->getBasicBlockList().erase(getIterator());
|
2004-10-12 06:21:39 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// Unlink this basic block from its current function and
|
2006-09-23 12:03:45 +08:00
|
|
|
/// insert it into the function that MovePos lives in, right before MovePos.
|
2005-08-13 06:14:06 +08:00
|
|
|
void BasicBlock::moveBefore(BasicBlock *MovePos) {
|
2015-10-09 07:49:46 +08:00
|
|
|
MovePos->getParent()->getBasicBlockList().splice(
|
|
|
|
MovePos->getIterator(), getParent()->getBasicBlockList(), getIterator());
|
2005-08-13 06:14:06 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// Unlink this basic block from its current function and
|
2006-09-23 12:03:45 +08:00
|
|
|
/// insert it into the function that MovePos lives in, right after MovePos.
|
|
|
|
void BasicBlock::moveAfter(BasicBlock *MovePos) {
|
2015-10-09 07:49:46 +08:00
|
|
|
MovePos->getParent()->getBasicBlockList().splice(
|
|
|
|
++MovePos->getIterator(), getParent()->getBasicBlockList(),
|
|
|
|
getIterator());
|
2006-09-23 12:03:45 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const Module *BasicBlock::getModule() const {
|
2015-05-27 05:03:23 +08:00
|
|
|
return getParent()->getParent();
|
|
|
|
}
|
|
|
|
|
2018-10-15 18:42:50 +08:00
|
|
|
const Instruction *BasicBlock::getTerminator() const {
|
|
|
|
if (InstList.empty() || !InstList.back().isTerminator())
|
|
|
|
return nullptr;
|
|
|
|
return &InstList.back();
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const CallInst *BasicBlock::getTerminatingMustTailCall() const {
|
2014-08-12 08:05:15 +08:00
|
|
|
if (InstList.empty())
|
|
|
|
return nullptr;
|
2017-03-27 10:38:17 +08:00
|
|
|
const ReturnInst *RI = dyn_cast<ReturnInst>(&InstList.back());
|
2014-08-12 08:05:15 +08:00
|
|
|
if (!RI || RI == &InstList.front())
|
|
|
|
return nullptr;
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const Instruction *Prev = RI->getPrevNode();
|
2014-08-12 08:05:15 +08:00
|
|
|
if (!Prev)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (Value *RV = RI->getReturnValue()) {
|
|
|
|
if (RV != Prev)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Look through the optional bitcast.
|
|
|
|
if (auto *BI = dyn_cast<BitCastInst>(Prev)) {
|
|
|
|
RV = BI->getOperand(0);
|
|
|
|
Prev = BI->getPrevNode();
|
|
|
|
if (!Prev || RV != Prev)
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *CI = dyn_cast<CallInst>(Prev)) {
|
|
|
|
if (CI->isMustTailCall())
|
|
|
|
return CI;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const CallInst *BasicBlock::getTerminatingDeoptimizeCall() const {
|
Introduce @llvm.experimental.deoptimize
Summary:
This intrinsic, together with deoptimization operand bundles, allow
frontends to express transfer of control and frame-local state from
one (typically more specialized, hence faster) version of a function
into another (typically more generic, hence slower) version.
In languages with a fully integrated managed runtime this intrinsic can
be used to implement "uncommon trap" like functionality. In unmanaged
languages like C and C++, this intrinsic can be used to represent the
slow paths of specialized functions.
Note: this change does not address how `@llvm.experimental_deoptimize`
is lowered. That will be done in a later change.
Reviewers: chandlerc, rnk, atrick, reames
Subscribers: llvm-commits, kmod, mjacob, maksfb, mcrosier, JosephTremoulet
Differential Revision: http://reviews.llvm.org/D17732
llvm-svn: 263281
2016-03-12 03:08:34 +08:00
|
|
|
if (InstList.empty())
|
|
|
|
return nullptr;
|
|
|
|
auto *RI = dyn_cast<ReturnInst>(&InstList.back());
|
|
|
|
if (!RI || RI == &InstList.front())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (auto *CI = dyn_cast_or_null<CallInst>(RI->getPrevNode()))
|
|
|
|
if (Function *F = CI->getCalledFunction())
|
|
|
|
if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize)
|
|
|
|
return CI;
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2020-01-16 05:57:34 +08:00
|
|
|
const CallInst *BasicBlock::getPostdominatingDeoptimizeCall() const {
|
|
|
|
const BasicBlock* BB = this;
|
2020-01-17 20:38:54 +08:00
|
|
|
SmallPtrSet<const BasicBlock *, 8> Visited;
|
|
|
|
Visited.insert(BB);
|
|
|
|
while (auto *Succ = BB->getUniqueSuccessor()) {
|
|
|
|
if (!Visited.insert(Succ).second)
|
|
|
|
return nullptr;
|
|
|
|
BB = Succ;
|
|
|
|
}
|
2020-01-16 05:57:34 +08:00
|
|
|
return BB->getTerminatingDeoptimizeCall();
|
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const Instruction* BasicBlock::getFirstNonPHI() const {
|
|
|
|
for (const Instruction &I : *this)
|
2015-07-08 02:49:41 +08:00
|
|
|
if (!isa<PHINode>(I))
|
|
|
|
return &I;
|
|
|
|
return nullptr;
|
2006-06-08 23:46:18 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const Instruction* BasicBlock::getFirstNonPHIOrDbg() const {
|
|
|
|
for (const Instruction &I : *this)
|
2015-07-08 02:49:41 +08:00
|
|
|
if (!isa<PHINode>(I) && !isa<DbgInfoIntrinsic>(I))
|
|
|
|
return &I;
|
|
|
|
return nullptr;
|
2010-04-03 05:49:27 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() const {
|
|
|
|
for (const Instruction &I : *this) {
|
2015-07-08 02:49:41 +08:00
|
|
|
if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
|
2011-07-01 04:14:24 +08:00
|
|
|
continue;
|
|
|
|
|
2018-12-22 05:49:40 +08:00
|
|
|
if (I.isLifetimeStartOrEnd())
|
|
|
|
continue;
|
2015-07-08 02:49:41 +08:00
|
|
|
|
|
|
|
return &I;
|
2011-07-01 04:14:24 +08:00
|
|
|
}
|
2015-07-08 02:49:41 +08:00
|
|
|
return nullptr;
|
2011-07-01 04:14:24 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
BasicBlock::const_iterator BasicBlock::getFirstInsertionPt() const {
|
|
|
|
const Instruction *FirstNonPHI = getFirstNonPHI();
|
2015-07-08 02:49:41 +08:00
|
|
|
if (!FirstNonPHI)
|
|
|
|
return end();
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const_iterator InsertPt = FirstNonPHI->getIterator();
|
2015-08-01 01:58:14 +08:00
|
|
|
if (InsertPt->isEHPad()) ++InsertPt;
|
2011-08-17 04:42:52 +08:00
|
|
|
return InsertPt;
|
|
|
|
}
|
|
|
|
|
2001-06-07 04:29:01 +08:00
|
|
|
void BasicBlock::dropAllReferences() {
|
2016-06-26 22:10:56 +08:00
|
|
|
for (Instruction &I : *this)
|
|
|
|
I.dropAllReferences();
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// If this basic block has a single predecessor block,
|
2005-02-24 10:37:26 +08:00
|
|
|
/// return the block, otherwise return a null pointer.
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *BasicBlock::getSinglePredecessor() const {
|
|
|
|
const_pred_iterator PI = pred_begin(this), E = pred_end(this);
|
2014-04-09 14:08:46 +08:00
|
|
|
if (PI == E) return nullptr; // No preds.
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *ThePred = *PI;
|
2005-02-24 10:37:26 +08:00
|
|
|
++PI;
|
2014-04-09 14:08:46 +08:00
|
|
|
return (PI == E) ? ThePred : nullptr /*multiple preds*/;
|
2005-02-24 10:37:26 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// If this basic block has a unique predecessor block,
|
2008-12-11 18:36:07 +08:00
|
|
|
/// return the block, otherwise return a null pointer.
|
2011-08-10 07:12:56 +08:00
|
|
|
/// Note that unique predecessor doesn't mean single edge, there can be
|
|
|
|
/// multiple edges from the unique predecessor to this block (for example
|
2008-12-11 19:44:49 +08:00
|
|
|
/// a switch statement with multiple cases having the same destination).
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *BasicBlock::getUniquePredecessor() const {
|
|
|
|
const_pred_iterator PI = pred_begin(this), E = pred_end(this);
|
2014-04-09 14:08:46 +08:00
|
|
|
if (PI == E) return nullptr; // No preds.
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *PredBB = *PI;
|
2008-12-11 18:36:07 +08:00
|
|
|
++PI;
|
|
|
|
for (;PI != E; ++PI) {
|
|
|
|
if (*PI != PredBB)
|
2014-04-09 14:08:46 +08:00
|
|
|
return nullptr;
|
2008-12-11 19:44:49 +08:00
|
|
|
// The same predecessor appears multiple times in the predecessor list.
|
|
|
|
// This is OK.
|
2008-12-11 18:36:07 +08:00
|
|
|
}
|
|
|
|
return PredBB;
|
|
|
|
}
|
|
|
|
|
2018-11-20 03:54:27 +08:00
|
|
|
bool BasicBlock::hasNPredecessors(unsigned N) const {
|
|
|
|
return hasNItems(pred_begin(this), pred_end(this), N);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BasicBlock::hasNPredecessorsOrMore(unsigned N) const {
|
|
|
|
return hasNItemsOrMore(pred_begin(this), pred_end(this), N);
|
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *BasicBlock::getSingleSuccessor() const {
|
2020-03-11 02:33:02 +08:00
|
|
|
const_succ_iterator SI = succ_begin(this), E = succ_end(this);
|
2015-05-16 01:54:48 +08:00
|
|
|
if (SI == E) return nullptr; // no successors
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *TheSucc = *SI;
|
2015-05-16 01:54:48 +08:00
|
|
|
++SI;
|
|
|
|
return (SI == E) ? TheSucc : nullptr /* multiple successors */;
|
|
|
|
}
|
|
|
|
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *BasicBlock::getUniqueSuccessor() const {
|
2020-03-11 02:33:02 +08:00
|
|
|
const_succ_iterator SI = succ_begin(this), E = succ_end(this);
|
2015-10-07 07:24:35 +08:00
|
|
|
if (SI == E) return nullptr; // No successors
|
2017-03-27 10:38:17 +08:00
|
|
|
const BasicBlock *SuccBB = *SI;
|
Add a pass for inserting safepoints into (nearly) arbitrary IR
This pass is responsible for figuring out where to place call safepoints and safepoint polls. It doesn't actually make the relocations explicit; that's the job of the RewriteStatepointsForGC pass (http://reviews.llvm.org/D6975).
Note that this code is not yet finalized. Its moving in tree for incremental development, but further cleanup is needed and will happen over the next few days. It is not yet part of the standard pass order.
Planned changes in the near future:
- I plan on restructuring the statepoint rewrite to use the functions add to the IRBuilder a while back.
- In the current pass, the function "gc.safepoint_poll" is treated specially but is not an intrinsic. I plan to make identifying the poll function a property of the GCStrategy at some point in the near future.
- As follow on patches, I will be separating a collection of test cases we have out of tree and submitting them upstream.
- It's not explicit in the code, but these two patches are introducing a new state for a statepoint which looks a lot like a patchpoint. There's no a transient form which doesn't yet have the relocations explicitly represented, but does prevent reordering of memory operations. Once this is in, I need to update actually make this explicit by reserving the 'unused' argument of the statepoint as a flag, updating the docs, and making the code explicitly check for such a thing. This wasn't really planned, but once I split the two passes - which was done for other reasons - the intermediate state fell out. Just reminds us once again that we need to merge statepoints and patchpoints at some point in the not that distant future.
Future directions planned:
- Identifying more cases where a backedge safepoint isn't required to ensure timely execution of a safepoint poll.
- Tweaking the insertion process to generate easier to optimize IR. (For example, investigating making SplitBackedge) the default.
- Adding opt-in flags for a GCStrategy to use this pass. Once done, add this pass to the actual pass ordering.
Differential Revision: http://reviews.llvm.org/D6981
llvm-svn: 228090
2015-02-04 08:37:33 +08:00
|
|
|
++SI;
|
|
|
|
for (;SI != E; ++SI) {
|
|
|
|
if (*SI != SuccBB)
|
2015-10-07 07:24:35 +08:00
|
|
|
return nullptr;
|
Add a pass for inserting safepoints into (nearly) arbitrary IR
This pass is responsible for figuring out where to place call safepoints and safepoint polls. It doesn't actually make the relocations explicit; that's the job of the RewriteStatepointsForGC pass (http://reviews.llvm.org/D6975).
Note that this code is not yet finalized. Its moving in tree for incremental development, but further cleanup is needed and will happen over the next few days. It is not yet part of the standard pass order.
Planned changes in the near future:
- I plan on restructuring the statepoint rewrite to use the functions add to the IRBuilder a while back.
- In the current pass, the function "gc.safepoint_poll" is treated specially but is not an intrinsic. I plan to make identifying the poll function a property of the GCStrategy at some point in the near future.
- As follow on patches, I will be separating a collection of test cases we have out of tree and submitting them upstream.
- It's not explicit in the code, but these two patches are introducing a new state for a statepoint which looks a lot like a patchpoint. There's no a transient form which doesn't yet have the relocations explicitly represented, but does prevent reordering of memory operations. Once this is in, I need to update actually make this explicit by reserving the 'unused' argument of the statepoint as a flag, updating the docs, and making the code explicitly check for such a thing. This wasn't really planned, but once I split the two passes - which was done for other reasons - the intermediate state fell out. Just reminds us once again that we need to merge statepoints and patchpoints at some point in the not that distant future.
Future directions planned:
- Identifying more cases where a backedge safepoint isn't required to ensure timely execution of a safepoint poll.
- Tweaking the insertion process to generate easier to optimize IR. (For example, investigating making SplitBackedge) the default.
- Adding opt-in flags for a GCStrategy to use this pass. Once done, add this pass to the actual pass ordering.
Differential Revision: http://reviews.llvm.org/D6981
llvm-svn: 228090
2015-02-04 08:37:33 +08:00
|
|
|
// The same successor appears multiple times in the successor list.
|
|
|
|
// This is OK.
|
|
|
|
}
|
|
|
|
return SuccBB;
|
|
|
|
}
|
|
|
|
|
2017-05-26 11:10:00 +08:00
|
|
|
iterator_range<BasicBlock::phi_iterator> BasicBlock::phis() {
|
2017-12-30 03:25:53 +08:00
|
|
|
PHINode *P = empty() ? nullptr : dyn_cast<PHINode>(&*begin());
|
|
|
|
return make_range<phi_iterator>(P, nullptr);
|
2017-05-26 11:10:00 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// This method is used to notify a BasicBlock that the
|
2005-04-22 00:06:03 +08:00
|
|
|
/// specified Predecessor of the block is no longer able to reach it. This is
|
2005-04-22 07:48:37 +08:00
|
|
|
/// actually not used to update the Predecessor list, but is actually used to
|
2005-04-22 00:06:03 +08:00
|
|
|
/// update the PHI nodes that reside in the block. Note that this should be
|
|
|
|
/// called while the predecessor still refers to this block.
|
|
|
|
///
|
2005-04-13 02:52:14 +08:00
|
|
|
void BasicBlock::removePredecessor(BasicBlock *Pred,
|
2019-02-12 15:09:29 +08:00
|
|
|
bool KeepOneInputPHIs) {
|
2005-02-24 00:53:04 +08:00
|
|
|
assert((hasNUsesOrMore(16)||// Reduce cost of this assertion for complex CFGs.
|
2005-02-23 15:09:08 +08:00
|
|
|
find(pred_begin(this), pred_end(this), Pred) != pred_end(this)) &&
|
2005-04-24 05:38:35 +08:00
|
|
|
"removePredecessor: BB is not a predecessor!");
|
2005-02-23 15:09:08 +08:00
|
|
|
|
2004-12-12 06:10:29 +08:00
|
|
|
if (InstList.empty()) return;
|
2004-07-07 01:44:17 +08:00
|
|
|
PHINode *APN = dyn_cast<PHINode>(&front());
|
|
|
|
if (!APN) return; // Quick exit.
|
2001-06-29 13:25:23 +08:00
|
|
|
|
|
|
|
// If there are exactly two predecessors, then we want to nuke the PHI nodes
|
2004-07-07 01:44:17 +08:00
|
|
|
// altogether. However, we cannot do this, if this in this case:
|
2002-05-22 03:52:49 +08:00
|
|
|
//
|
|
|
|
// Loop:
|
|
|
|
// %x = phi [X, Loop]
|
|
|
|
// %x2 = add %x, 1 ;; This would become %x2 = add %x2, 1
|
|
|
|
// br Loop ;; %x2 does not dominate all uses
|
|
|
|
//
|
|
|
|
// This is because the PHI node input is actually taken from the predecessor
|
2005-04-22 07:48:37 +08:00
|
|
|
// basic block. The only case this can happen is with a self loop, so we
|
2002-05-22 03:52:49 +08:00
|
|
|
// check for this case explicitly now.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-07-07 01:44:17 +08:00
|
|
|
unsigned max_idx = APN->getNumIncomingValues();
|
2001-06-29 13:25:23 +08:00
|
|
|
assert(max_idx != 0 && "PHI Node in block with 0 predecessors!?!?!");
|
2002-05-22 03:52:49 +08:00
|
|
|
if (max_idx == 2) {
|
2004-07-07 01:44:17 +08:00
|
|
|
BasicBlock *Other = APN->getIncomingBlock(APN->getIncomingBlock(0) == Pred);
|
2002-05-22 03:52:49 +08:00
|
|
|
|
|
|
|
// Disable PHI elimination!
|
|
|
|
if (this == Other) max_idx = 3;
|
|
|
|
}
|
|
|
|
|
2005-04-13 02:52:14 +08:00
|
|
|
// <= Two predecessors BEFORE I remove one?
|
2019-02-12 15:09:29 +08:00
|
|
|
if (max_idx <= 2 && !KeepOneInputPHIs) {
|
2001-10-02 11:41:24 +08:00
|
|
|
// Yup, loop through and nuke the PHI nodes
|
2002-06-26 00:13:24 +08:00
|
|
|
while (PHINode *PN = dyn_cast<PHINode>(&front())) {
|
2005-04-13 02:52:14 +08:00
|
|
|
// Remove the predecessor first.
|
2019-02-12 15:09:29 +08:00
|
|
|
PN->removeIncomingValue(Pred, !KeepOneInputPHIs);
|
2001-06-29 13:25:23 +08:00
|
|
|
|
|
|
|
// If the PHI _HAD_ two uses, replace PHI node with its now *single* value
|
2002-10-09 05:36:34 +08:00
|
|
|
if (max_idx == 2) {
|
2011-06-20 22:18:48 +08:00
|
|
|
if (PN->getIncomingValue(0) != PN)
|
|
|
|
PN->replaceAllUsesWith(PN->getIncomingValue(0));
|
2003-04-26 07:14:19 +08:00
|
|
|
else
|
|
|
|
// We are left with an infinite loop with no entries: kill the PHI.
|
2009-07-31 07:03:37 +08:00
|
|
|
PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
|
2002-10-09 05:36:34 +08:00
|
|
|
getInstList().pop_front(); // Remove the PHI node
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the PHI node already only had one entry, it got deleted by
|
|
|
|
// removeIncomingValue.
|
2001-06-29 13:25:23 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Okay, now we know that we need to remove predecessor #pred_idx from all
|
|
|
|
// PHI nodes. Iterate over each PHI node fixing them up
|
2004-06-05 08:11:27 +08:00
|
|
|
PHINode *PN;
|
2005-08-05 23:34:10 +08:00
|
|
|
for (iterator II = begin(); (PN = dyn_cast<PHINode>(II)); ) {
|
|
|
|
++II;
|
2008-04-26 00:53:59 +08:00
|
|
|
PN->removeIncomingValue(Pred, false);
|
2005-08-05 07:24:19 +08:00
|
|
|
// If all incoming values to the Phi are the same, we can replace the Phi
|
|
|
|
// with that value.
|
2014-04-09 14:08:46 +08:00
|
|
|
Value* PNV = nullptr;
|
2019-02-12 15:09:29 +08:00
|
|
|
if (!KeepOneInputPHIs && (PNV = PN->hasConstantValue()))
|
2010-11-17 18:23:23 +08:00
|
|
|
if (PNV != PN) {
|
|
|
|
PN->replaceAllUsesWith(PNV);
|
|
|
|
PN->eraseFromParent();
|
|
|
|
}
|
2005-08-05 07:24:19 +08:00
|
|
|
}
|
2001-06-29 13:25:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
bool BasicBlock::canSplitPredecessors() const {
|
|
|
|
const Instruction *FirstNonPHI = getFirstNonPHI();
|
|
|
|
if (isa<LandingPadInst>(FirstNonPHI))
|
|
|
|
return true;
|
|
|
|
// This is perhaps a little conservative because constructs like
|
|
|
|
// CleanupBlockInst are pretty easy to split. However, SplitBlockPredecessors
|
|
|
|
// cannot handle such things just yet.
|
|
|
|
if (FirstNonPHI->isEHPad())
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
2001-06-07 04:29:01 +08:00
|
|
|
|
2017-06-23 07:27:16 +08:00
|
|
|
bool BasicBlock::isLegalToHoistInto() const {
|
|
|
|
auto *Term = getTerminator();
|
|
|
|
// No terminator means the block is under construction.
|
|
|
|
if (!Term)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If the block has no successors, there can be no instructions to hoist.
|
|
|
|
assert(Term->getNumSuccessors() > 0);
|
|
|
|
|
|
|
|
// Instructions should not be hoisted across exception handling boundaries.
|
2018-08-26 16:56:42 +08:00
|
|
|
return !Term->isExceptionalTerminator();
|
2017-06-23 07:27:16 +08:00
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// This splits a basic block into two at the specified
|
2005-04-22 00:04:49 +08:00
|
|
|
/// instruction. Note that all instructions BEFORE the specified iterator stay
|
2005-04-22 07:48:37 +08:00
|
|
|
/// as part of the original basic block, an unconditional branch is added to
|
2005-04-22 00:04:49 +08:00
|
|
|
/// the new BB, and the rest of the instructions in the BB are moved to the new
|
|
|
|
/// BB, including the old terminator. This invalidates the iterator.
|
|
|
|
///
|
2005-04-22 07:48:37 +08:00
|
|
|
/// Note that this only works on well formed basic blocks (must have a
|
2005-04-22 00:04:49 +08:00
|
|
|
/// terminator), and 'I' must not be the end of instruction list (which would
|
|
|
|
/// cause a degenerate basic block to be formed, having a terminator inside of
|
2005-04-22 07:48:37 +08:00
|
|
|
/// the basic block).
|
2005-04-22 00:04:49 +08:00
|
|
|
///
|
2009-07-25 12:41:11 +08:00
|
|
|
BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
|
2001-06-07 04:29:01 +08:00
|
|
|
assert(getTerminator() && "Can't use splitBasicBlock on degenerate BB!");
|
2005-04-22 07:48:37 +08:00
|
|
|
assert(I != InstList.end() &&
|
2005-04-24 05:38:35 +08:00
|
|
|
"Trying to get me to create degenerate basic block!");
|
2001-06-07 04:29:01 +08:00
|
|
|
|
2016-02-22 03:52:15 +08:00
|
|
|
BasicBlock *New = BasicBlock::Create(getContext(), BBName, getParent(),
|
|
|
|
this->getNextNode());
|
2001-06-07 04:29:01 +08:00
|
|
|
|
2015-06-12 02:25:54 +08:00
|
|
|
// Save DebugLoc of split point before invalidating iterator.
|
|
|
|
DebugLoc Loc = I->getDebugLoc();
|
2004-02-04 07:11:21 +08:00
|
|
|
// Move all of the specified instructions from the original basic block into
|
|
|
|
// the new basic block.
|
|
|
|
New->getInstList().splice(New->end(), this->getInstList(), I, end());
|
2001-06-07 04:29:01 +08:00
|
|
|
|
|
|
|
// Add a branch instruction to the newly formed basic block.
|
2015-06-12 02:25:54 +08:00
|
|
|
BranchInst *BI = BranchInst::Create(New, this);
|
|
|
|
BI->setDebugLoc(Loc);
|
2002-02-25 08:35:07 +08:00
|
|
|
|
|
|
|
// Now we must loop through all of the successors of the New block (which
|
|
|
|
// _were_ the successors of the 'this' block), and update any PHI nodes in
|
|
|
|
// successors. If there were PHI nodes in the successors, then they need to
|
2019-05-06 02:59:45 +08:00
|
|
|
// know that incoming branches will be from New, not from Old (this).
|
2002-02-25 08:35:07 +08:00
|
|
|
//
|
2019-05-06 02:59:45 +08:00
|
|
|
New->replaceSuccessorsPhiUsesWith(this, New);
|
2001-06-07 04:29:01 +08:00
|
|
|
return New;
|
|
|
|
}
|
2009-10-29 08:09:08 +08:00
|
|
|
|
2019-05-06 02:59:39 +08:00
|
|
|
void BasicBlock::replacePhiUsesWith(BasicBlock *Old, BasicBlock *New) {
|
|
|
|
// N.B. This might not be a complete BasicBlock, so don't assume
|
|
|
|
// that it ends with a non-phi instruction.
|
|
|
|
for (iterator II = begin(), IE = end(); II != IE; ++II) {
|
|
|
|
PHINode *PN = dyn_cast<PHINode>(II);
|
|
|
|
if (!PN)
|
|
|
|
break;
|
|
|
|
PN->replaceIncomingBlockWith(Old, New);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-06 02:59:45 +08:00
|
|
|
void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *Old,
|
|
|
|
BasicBlock *New) {
|
2018-10-15 18:04:59 +08:00
|
|
|
Instruction *TI = getTerminator();
|
2011-06-23 17:09:15 +08:00
|
|
|
if (!TI)
|
|
|
|
// Cope with being called on a BasicBlock that doesn't have a terminator
|
|
|
|
// yet. Clang's CodeGenFunction::EmitReturnBlock() likes to do this.
|
|
|
|
return;
|
2019-05-06 02:59:45 +08:00
|
|
|
llvm::for_each(successors(TI), [Old, New](BasicBlock *Succ) {
|
|
|
|
Succ->replacePhiUsesWith(Old, New);
|
2019-05-06 02:59:39 +08:00
|
|
|
});
|
2011-06-23 17:09:15 +08:00
|
|
|
}
|
2011-08-13 04:24:12 +08:00
|
|
|
|
2019-05-06 02:59:45 +08:00
|
|
|
void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *New) {
|
|
|
|
this->replaceSuccessorsPhiUsesWith(this, New);
|
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// Return true if this basic block is a landing pad. I.e., it's
|
2011-08-13 04:24:12 +08:00
|
|
|
/// the destination of the 'unwind' edge of an invoke instruction.
|
|
|
|
bool BasicBlock::isLandingPad() const {
|
|
|
|
return isa<LandingPadInst>(getFirstNonPHI());
|
|
|
|
}
|
|
|
|
|
2015-02-28 02:07:41 +08:00
|
|
|
/// Return the landingpad instruction associated with the landing pad.
|
2012-01-31 08:26:24 +08:00
|
|
|
const LandingPadInst *BasicBlock::getLandingPadInst() const {
|
|
|
|
return dyn_cast<LandingPadInst>(getFirstNonPHI());
|
|
|
|
}
|
2017-11-03 06:26:51 +08:00
|
|
|
|
|
|
|
Optional<uint64_t> BasicBlock::getIrrLoopHeaderWeight() const {
|
2018-10-15 18:04:59 +08:00
|
|
|
const Instruction *TI = getTerminator();
|
2017-11-03 06:26:51 +08:00
|
|
|
if (MDNode *MDIrrLoopHeader =
|
|
|
|
TI->getMetadata(LLVMContext::MD_irr_loop)) {
|
|
|
|
MDString *MDName = cast<MDString>(MDIrrLoopHeader->getOperand(0));
|
|
|
|
if (MDName->getString().equals("loop_header_weight")) {
|
|
|
|
auto *CI = mdconst::extract<ConstantInt>(MDIrrLoopHeader->getOperand(1));
|
|
|
|
return Optional<uint64_t>(CI->getValue().getZExtValue());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Optional<uint64_t>();
|
|
|
|
}
|
2018-06-20 07:42:17 +08:00
|
|
|
|
2018-06-27 05:16:59 +08:00
|
|
|
BasicBlock::iterator llvm::skipDebugIntrinsics(BasicBlock::iterator It) {
|
2018-06-20 07:42:17 +08:00
|
|
|
while (isa<DbgInfoIntrinsic>(It))
|
|
|
|
++It;
|
|
|
|
return It;
|
|
|
|
}
|
[IR] Lazily number instructions for local dominance queries
Essentially, fold OrderedBasicBlock into BasicBlock, and make it
auto-invalidate the instruction ordering when new instructions are
added. Notably, we don't need to invalidate it when removing
instructions, which is helpful when a pass mostly delete dead
instructions rather than transforming them.
The downside is that Instruction grows from 56 bytes to 64 bytes. The
resulting LLVM code is substantially simpler and automatically handles
invalidation, which makes me think that this is the right speed and size
tradeoff.
The important change is in SymbolTableTraitsImpl.h, where the numbering
is invalidated. Everything else should be straightforward.
We probably want to implement a fancier re-numbering scheme so that
local updates don't invalidate the ordering, but I plan for that to be
future work, maybe for someone else.
Reviewed By: lattner, vsk, fhahn, dexonsmith
Differential Revision: https://reviews.llvm.org/D51664
2020-02-19 06:33:54 +08:00
|
|
|
|
|
|
|
void BasicBlock::renumberInstructions() {
|
|
|
|
unsigned Order = 0;
|
|
|
|
for (Instruction &I : *this)
|
|
|
|
I.Order = Order++;
|
|
|
|
|
|
|
|
// Set the bit to indicate that the instruction order valid and cached.
|
|
|
|
BasicBlockBits Bits = getBasicBlockBits();
|
|
|
|
Bits.InstrOrderValid = true;
|
|
|
|
setBasicBlockBits(Bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
/// In asserts builds, this checks the numbering. In non-asserts builds, it
|
2020-02-22 04:31:00 +08:00
|
|
|
/// is defined as a no-op inline function in BasicBlock.h.
|
[IR] Lazily number instructions for local dominance queries
Essentially, fold OrderedBasicBlock into BasicBlock, and make it
auto-invalidate the instruction ordering when new instructions are
added. Notably, we don't need to invalidate it when removing
instructions, which is helpful when a pass mostly delete dead
instructions rather than transforming them.
The downside is that Instruction grows from 56 bytes to 64 bytes. The
resulting LLVM code is substantially simpler and automatically handles
invalidation, which makes me think that this is the right speed and size
tradeoff.
The important change is in SymbolTableTraitsImpl.h, where the numbering
is invalidated. Everything else should be straightforward.
We probably want to implement a fancier re-numbering scheme so that
local updates don't invalidate the ordering, but I plan for that to be
future work, maybe for someone else.
Reviewed By: lattner, vsk, fhahn, dexonsmith
Differential Revision: https://reviews.llvm.org/D51664
2020-02-19 06:33:54 +08:00
|
|
|
void BasicBlock::validateInstrOrdering() const {
|
|
|
|
if (!isInstrOrderValid())
|
|
|
|
return;
|
|
|
|
const Instruction *Prev = nullptr;
|
|
|
|
for (const Instruction &I : *this) {
|
|
|
|
assert((!Prev || Prev->comesBefore(&I)) &&
|
|
|
|
"cached instruction ordering is incorrect");
|
|
|
|
Prev = &I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|