2002-05-08 02:07:59 +08:00
|
|
|
//===-- Local.cpp - Functions to perform local transformations ------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-05-08 02:07:59 +08:00
|
|
|
//
|
|
|
|
// This family of functions perform various local transformations to the
|
|
|
|
// program.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2015-06-19 00:01:00 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
|
|
|
#include "llvm/ADT/Hashing.h"
|
2012-12-21 19:18:49 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2015-09-29 02:56:07 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2013-01-02 18:22:59 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2013-08-13 06:38:43 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2015-12-03 07:06:39 +08:00
|
|
|
#include "llvm/Analysis/EHPersonalities.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
|
|
|
#include "llvm/Analysis/MemoryBuiltins.h"
|
2016-01-10 15:13:04 +08:00
|
|
|
#include "llvm/Analysis/LazyValueInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2014-03-06 08:22:06 +08:00
|
|
|
#include "llvm/IR/DIBuilder.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-03-06 08:46:21 +08:00
|
|
|
#include "llvm/IR/DebugInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2014-03-04 18:40:04 +08:00
|
|
|
#include "llvm/IR/GetElementPtrTypeIterator.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/MDBuilder.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
2016-06-25 16:34:38 +08:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
2014-03-04 19:17:44 +08:00
|
|
|
#include "llvm/IR/ValueHandle.h"
|
2009-11-10 13:59:26 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2005-09-26 13:27:10 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2009-11-10 13:59:26 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2003-12-19 13:56:28 +08:00
|
|
|
using namespace llvm;
|
2016-06-25 16:34:38 +08:00
|
|
|
using namespace llvm::PatternMatch;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2014-04-22 06:55:11 +08:00
|
|
|
#define DEBUG_TYPE "local"
|
|
|
|
|
2013-08-13 06:38:43 +08:00
|
|
|
STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
|
|
|
|
|
2002-05-08 02:07:59 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2008-11-28 06:57:53 +08:00
|
|
|
// Local constant propagation.
|
2002-05-08 02:07:59 +08:00
|
|
|
//
|
|
|
|
|
2011-05-23 00:24:18 +08:00
|
|
|
/// ConstantFoldTerminator - If a terminator instruction is predicated on a
|
|
|
|
/// constant value, convert it into an unconditional branch to the constant
|
|
|
|
/// destination. This is a nontrivial operation because the successors of this
|
|
|
|
/// basic block must have their PHI nodes updated.
|
|
|
|
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
|
|
|
|
/// conditions and indirectbr addresses this might make dead if
|
|
|
|
/// DeleteDeadConditions is true.
|
2012-08-29 23:32:21 +08:00
|
|
|
bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2002-05-22 04:04:50 +08:00
|
|
|
TerminatorInst *T = BB->getTerminator();
|
2011-05-19 01:26:46 +08:00
|
|
|
IRBuilder<> Builder(T);
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2002-05-08 02:07:59 +08:00
|
|
|
// Branch - See if we are conditional jumping on constant
|
|
|
|
if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
|
|
|
|
if (BI->isUnconditional()) return false; // Can't optimize uncond branch
|
2009-01-31 02:21:13 +08:00
|
|
|
BasicBlock *Dest1 = BI->getSuccessor(0);
|
|
|
|
BasicBlock *Dest2 = BI->getSuccessor(1);
|
2002-05-08 02:07:59 +08:00
|
|
|
|
2007-01-11 20:24:14 +08:00
|
|
|
if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
|
2002-05-08 02:07:59 +08:00
|
|
|
// Are we branching on constant?
|
|
|
|
// YES. Change to unconditional branch...
|
2007-01-12 12:24:46 +08:00
|
|
|
BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
|
|
|
|
BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
|
2002-05-08 02:07:59 +08:00
|
|
|
|
2005-04-22 07:48:37 +08:00
|
|
|
//cerr << "Function: " << T->getParent()->getParent()
|
|
|
|
// << "\nRemoving branch from " << T->getParent()
|
2002-05-08 02:07:59 +08:00
|
|
|
// << "\n\nTo: " << OldDest << endl;
|
|
|
|
|
|
|
|
// Let the basic block know that we are letting go of it. Based on this,
|
|
|
|
// it will adjust it's PHI nodes.
|
2011-04-19 23:23:29 +08:00
|
|
|
OldDest->removePredecessor(BB);
|
2002-05-08 02:07:59 +08:00
|
|
|
|
2011-01-08 04:25:56 +08:00
|
|
|
// Replace the conditional branch with an unconditional one.
|
2011-05-19 01:26:46 +08:00
|
|
|
Builder.CreateBr(Destination);
|
2011-01-08 04:25:56 +08:00
|
|
|
BI->eraseFromParent();
|
2002-05-08 02:07:59 +08:00
|
|
|
return true;
|
2009-11-01 11:40:38 +08:00
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
if (Dest2 == Dest1) { // Conditional branch to same location?
|
2005-04-22 07:48:37 +08:00
|
|
|
// This branch matches something like this:
|
2002-05-08 02:07:59 +08:00
|
|
|
// br bool %cond, label %Dest, label %Dest
|
|
|
|
// and changes it into: br label %Dest
|
|
|
|
|
|
|
|
// Let the basic block know that we are letting go of one copy of it.
|
|
|
|
assert(BI->getParent() && "Terminator not inserted in block!");
|
|
|
|
Dest1->removePredecessor(BI->getParent());
|
|
|
|
|
2011-01-08 04:25:56 +08:00
|
|
|
// Replace the conditional branch with an unconditional one.
|
2011-05-19 01:26:46 +08:00
|
|
|
Builder.CreateBr(Dest1);
|
2011-05-23 00:24:18 +08:00
|
|
|
Value *Cond = BI->getCondition();
|
2011-01-08 04:25:56 +08:00
|
|
|
BI->eraseFromParent();
|
2011-05-23 00:24:18 +08:00
|
|
|
if (DeleteDeadConditions)
|
2012-08-29 23:32:21 +08:00
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
|
2002-05-08 02:07:59 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-11-01 11:40:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
|
2015-01-27 03:52:24 +08:00
|
|
|
// If we are switching on a constant, we can convert the switch to an
|
|
|
|
// unconditional branch.
|
2003-08-18 04:21:14 +08:00
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
|
2015-01-27 03:52:24 +08:00
|
|
|
BasicBlock *DefaultDest = SI->getDefaultDest();
|
|
|
|
BasicBlock *TheOnlyDest = DefaultDest;
|
|
|
|
|
|
|
|
// If the default is unreachable, ignore it when searching for TheOnlyDest.
|
|
|
|
if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
|
|
|
|
SI->getNumCases() > 0) {
|
|
|
|
TheOnlyDest = SI->case_begin().getCaseSuccessor();
|
|
|
|
}
|
2003-08-18 04:21:14 +08:00
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
// Figure out which case it goes to.
|
2012-03-11 14:09:17 +08:00
|
|
|
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
|
2012-03-08 15:06:20 +08:00
|
|
|
i != e; ++i) {
|
2003-08-18 04:21:14 +08:00
|
|
|
// Found case matching a constant operand?
|
2012-03-08 15:06:20 +08:00
|
|
|
if (i.getCaseValue() == CI) {
|
|
|
|
TheOnlyDest = i.getCaseSuccessor();
|
2003-08-18 04:21:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2003-08-24 07:18:19 +08:00
|
|
|
// Check to see if this branch is going to the same place as the default
|
|
|
|
// dest. If so, eliminate it as an explicit compare.
|
2012-03-08 15:06:20 +08:00
|
|
|
if (i.getCaseSuccessor() == DefaultDest) {
|
2014-11-12 05:30:22 +08:00
|
|
|
MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
|
2013-12-10 08:13:41 +08:00
|
|
|
unsigned NCases = SI->getNumCases();
|
|
|
|
// Fold the case metadata into the default if there will be any branches
|
|
|
|
// left, unless the metadata doesn't match the switch.
|
|
|
|
if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
|
2012-09-13 01:04:11 +08:00
|
|
|
// Collect branch weights into a vector.
|
|
|
|
SmallVector<uint32_t, 8> Weights;
|
|
|
|
for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
|
|
|
|
++MD_i) {
|
2016-06-25 16:34:38 +08:00
|
|
|
auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
|
2012-09-13 01:04:11 +08:00
|
|
|
Weights.push_back(CI->getValue().getZExtValue());
|
|
|
|
}
|
|
|
|
// Merge weight of this case to the default weight.
|
|
|
|
unsigned idx = i.getCaseIndex();
|
|
|
|
Weights[0] += Weights[idx+1];
|
|
|
|
// Remove weight for this case.
|
|
|
|
std::swap(Weights[idx+1], Weights.back());
|
|
|
|
Weights.pop_back();
|
|
|
|
SI->setMetadata(LLVMContext::MD_prof,
|
|
|
|
MDBuilder(BB->getContext()).
|
|
|
|
createBranchWeights(Weights));
|
|
|
|
}
|
2009-11-01 11:40:38 +08:00
|
|
|
// Remove this entry.
|
2003-08-24 07:18:19 +08:00
|
|
|
DefaultDest->removePredecessor(SI->getParent());
|
|
|
|
SI->removeCase(i);
|
2012-03-08 15:06:20 +08:00
|
|
|
--i; --e;
|
2003-08-24 07:18:19 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-08-18 04:21:14 +08:00
|
|
|
// Otherwise, check to see if the switch only branches to one destination.
|
|
|
|
// We do this by reseting "TheOnlyDest" to null when we find two non-equal
|
|
|
|
// destinations.
|
2014-04-25 13:29:35 +08:00
|
|
|
if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = nullptr;
|
2003-08-18 04:21:14 +08:00
|
|
|
}
|
2003-08-18 03:41:53 +08:00
|
|
|
|
2003-08-18 04:21:14 +08:00
|
|
|
if (CI && !TheOnlyDest) {
|
|
|
|
// Branching on a constant, but not any of the cases, go to the default
|
|
|
|
// successor.
|
|
|
|
TheOnlyDest = SI->getDefaultDest();
|
2003-08-18 03:41:53 +08:00
|
|
|
}
|
|
|
|
|
2003-08-18 04:21:14 +08:00
|
|
|
// If we found a single destination that we can fold the switch into, do so
|
|
|
|
// now.
|
|
|
|
if (TheOnlyDest) {
|
2009-11-01 11:40:38 +08:00
|
|
|
// Insert the new branch.
|
2011-05-19 01:26:46 +08:00
|
|
|
Builder.CreateBr(TheOnlyDest);
|
2003-08-18 04:21:14 +08:00
|
|
|
BasicBlock *BB = SI->getParent();
|
|
|
|
|
|
|
|
// Remove entries from PHI nodes which we no longer branch to...
|
2015-08-07 04:22:46 +08:00
|
|
|
for (BasicBlock *Succ : SI->successors()) {
|
2003-08-18 04:21:14 +08:00
|
|
|
// Found case matching a constant operand?
|
|
|
|
if (Succ == TheOnlyDest)
|
2014-04-25 13:29:35 +08:00
|
|
|
TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
|
2003-08-18 04:21:14 +08:00
|
|
|
else
|
|
|
|
Succ->removePredecessor(BB);
|
|
|
|
}
|
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
// Delete the old switch.
|
2011-05-23 00:24:18 +08:00
|
|
|
Value *Cond = SI->getCondition();
|
|
|
|
SI->eraseFromParent();
|
|
|
|
if (DeleteDeadConditions)
|
2012-08-29 23:32:21 +08:00
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
|
2003-08-18 04:21:14 +08:00
|
|
|
return true;
|
2009-11-01 11:40:38 +08:00
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
llvm-svn: 149481
2012-02-01 15:49:51 +08:00
|
|
|
if (SI->getNumCases() == 1) {
|
2003-08-18 04:21:14 +08:00
|
|
|
// Otherwise, we can fold this switch into a conditional branch
|
|
|
|
// instruction if it has only one non-default destination.
|
2012-03-11 14:09:17 +08:00
|
|
|
SwitchInst::CaseIt FirstCase = SI->case_begin();
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
|
|
|
|
FirstCase.getCaseValue(), "cond");
|
2012-05-23 16:18:26 +08:00
|
|
|
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
// Insert the new branch.
|
|
|
|
BranchInst *NewBr = Builder.CreateCondBr(Cond,
|
|
|
|
FirstCase.getCaseSuccessor(),
|
|
|
|
SI->getDefaultDest());
|
2014-11-12 05:30:22 +08:00
|
|
|
MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
if (MD && MD->getNumOperands() == 3) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
ConstantInt *SICase =
|
|
|
|
mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
|
|
|
|
ConstantInt *SIDef =
|
|
|
|
mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
assert(SICase && SIDef);
|
|
|
|
// The TrueWeight should be the weight for the single case of SI.
|
|
|
|
NewBr->setMetadata(LLVMContext::MD_prof,
|
|
|
|
MDBuilder(BB->getContext()).
|
|
|
|
createBranchWeights(SICase->getValue().getZExtValue(),
|
|
|
|
SIDef->getValue().getZExtValue()));
|
2012-05-23 16:18:26 +08:00
|
|
|
}
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
|
2015-08-08 03:30:12 +08:00
|
|
|
// Update make.implicit metadata to the newly-created conditional branch.
|
|
|
|
MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
|
|
|
|
if (MakeImplicitMD)
|
|
|
|
NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
|
|
|
|
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
// Delete the old switch.
|
|
|
|
SI->eraseFromParent();
|
|
|
|
return true;
|
2003-08-18 04:21:14 +08:00
|
|
|
}
|
2009-11-01 11:40:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) {
|
|
|
|
// indirectbr blockaddress(@F, @BB) -> br label @BB
|
|
|
|
if (BlockAddress *BA =
|
|
|
|
dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
|
|
|
|
BasicBlock *TheOnlyDest = BA->getBasicBlock();
|
|
|
|
// Insert the new branch.
|
2011-05-19 01:26:46 +08:00
|
|
|
Builder.CreateBr(TheOnlyDest);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
|
|
|
|
if (IBI->getDestination(i) == TheOnlyDest)
|
2014-04-25 13:29:35 +08:00
|
|
|
TheOnlyDest = nullptr;
|
2009-11-01 11:40:38 +08:00
|
|
|
else
|
|
|
|
IBI->getDestination(i)->removePredecessor(IBI->getParent());
|
|
|
|
}
|
2011-05-23 00:24:18 +08:00
|
|
|
Value *Address = IBI->getAddress();
|
2009-11-01 11:40:38 +08:00
|
|
|
IBI->eraseFromParent();
|
2011-05-23 00:24:18 +08:00
|
|
|
if (DeleteDeadConditions)
|
2012-08-29 23:32:21 +08:00
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
// If we didn't find our destination in the IBI successor list, then we
|
|
|
|
// have undefined behavior. Replace the unconditional branch with an
|
|
|
|
// 'unreachable' instruction.
|
|
|
|
if (TheOnlyDest) {
|
|
|
|
BB->getTerminator()->eraseFromParent();
|
|
|
|
new UnreachableInst(BB->getContext(), BB);
|
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-01 11:40:38 +08:00
|
|
|
return true;
|
|
|
|
}
|
2002-05-08 02:07:59 +08:00
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2002-05-08 02:07:59 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2009-11-11 06:26:15 +08:00
|
|
|
// Local dead code elimination.
|
2002-05-08 02:07:59 +08:00
|
|
|
//
|
|
|
|
|
2008-11-28 06:57:53 +08:00
|
|
|
/// isInstructionTriviallyDead - Return true if the result produced by the
|
|
|
|
/// instruction is not used, and the instruction has no side effects.
|
|
|
|
///
|
2012-08-29 23:32:21 +08:00
|
|
|
bool llvm::isInstructionTriviallyDead(Instruction *I,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2005-05-06 13:27:34 +08:00
|
|
|
if (!I->use_empty() || isa<TerminatorInst>(I)) return false;
|
2005-07-27 14:12:32 +08:00
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
// We don't want the landingpad-like instructions removed by anything this
|
|
|
|
// general.
|
|
|
|
if (I->isEHPad())
|
2011-08-16 04:10:51 +08:00
|
|
|
return false;
|
|
|
|
|
2011-03-19 07:28:02 +08:00
|
|
|
// We don't want debug info removed by anything this general, unless
|
|
|
|
// debug info is empty.
|
|
|
|
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
|
2011-08-03 05:19:27 +08:00
|
|
|
if (DDI->getAddress())
|
2011-03-19 07:28:02 +08:00
|
|
|
return false;
|
2011-03-22 06:04:45 +08:00
|
|
|
return true;
|
2011-08-03 05:19:27 +08:00
|
|
|
}
|
2011-03-22 06:04:45 +08:00
|
|
|
if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
|
2011-03-19 07:28:02 +08:00
|
|
|
if (DVI->getValue())
|
|
|
|
return false;
|
2011-03-22 06:04:45 +08:00
|
|
|
return true;
|
2011-03-19 07:28:02 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 14:49:50 +08:00
|
|
|
if (!I->mayHaveSideEffects()) return true;
|
|
|
|
|
|
|
|
// Special case intrinsics that "may have side effects" but can be deleted
|
|
|
|
// when dead.
|
2011-08-03 05:19:27 +08:00
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
2007-12-29 08:59:12 +08:00
|
|
|
// Safe to delete llvm.stacksave if dead.
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::stacksave)
|
|
|
|
return true;
|
2011-08-03 05:19:27 +08:00
|
|
|
|
|
|
|
// Lifetime intrinsics are dead when their right-hand is undef.
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
|
|
|
|
II->getIntrinsicID() == Intrinsic::lifetime_end)
|
|
|
|
return isa<UndefValue>(II->getArgOperand(1));
|
2014-07-26 05:13:35 +08:00
|
|
|
|
2016-04-30 06:23:16 +08:00
|
|
|
// Assumptions are dead if their condition is trivially true. Guards on
|
|
|
|
// true are operationally no-ops. In the future we can consider more
|
|
|
|
// sophisticated tradeoffs for guards considering potential for check
|
|
|
|
// widening, but for now we keep things simple.
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::assume ||
|
|
|
|
II->getIntrinsicID() == Intrinsic::experimental_guard) {
|
2014-07-26 05:13:35 +08:00
|
|
|
if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
|
|
|
|
return !Cond->isZero();
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2011-08-03 05:19:27 +08:00
|
|
|
}
|
2011-10-24 12:35:36 +08:00
|
|
|
|
2012-08-29 23:32:21 +08:00
|
|
|
if (isAllocLikeFn(I, TLI)) return true;
|
2011-10-24 12:35:36 +08:00
|
|
|
|
2012-08-29 23:32:21 +08:00
|
|
|
if (CallInst *CI = isFreeCall(I, TLI))
|
2011-10-24 12:35:36 +08:00
|
|
|
if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
|
|
|
|
return C->isNullValue() || isa<UndefValue>(C);
|
|
|
|
|
2016-11-03 04:48:11 +08:00
|
|
|
if (CallSite CS = CallSite(I))
|
|
|
|
if (isMathLibCallNoop(CS, TLI))
|
|
|
|
return true;
|
|
|
|
|
2005-05-06 13:27:34 +08:00
|
|
|
return false;
|
2002-05-08 02:07:59 +08:00
|
|
|
}
|
|
|
|
|
2008-11-28 06:57:53 +08:00
|
|
|
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
|
|
|
|
/// trivially dead instruction, delete it. If that makes any of its operands
|
2010-01-05 23:45:31 +08:00
|
|
|
/// trivially dead, delete them too, recursively. Return true if any
|
|
|
|
/// instructions were deleted.
|
2012-08-29 23:32:21 +08:00
|
|
|
bool
|
|
|
|
llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2008-11-28 06:57:53 +08:00
|
|
|
Instruction *I = dyn_cast<Instruction>(V);
|
2012-08-29 23:32:21 +08:00
|
|
|
if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
|
2010-01-05 23:45:31 +08:00
|
|
|
return false;
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2008-11-28 09:20:46 +08:00
|
|
|
SmallVector<Instruction*, 16> DeadInsts;
|
|
|
|
DeadInsts.push_back(I);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2010-01-06 00:27:25 +08:00
|
|
|
do {
|
2009-05-07 01:22:41 +08:00
|
|
|
I = DeadInsts.pop_back_val();
|
2008-11-28 08:58:15 +08:00
|
|
|
|
2008-11-28 09:20:46 +08:00
|
|
|
// Null out all of the instruction's operands to see if any operand becomes
|
|
|
|
// dead as we go.
|
|
|
|
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
|
|
|
|
Value *OpV = I->getOperand(i);
|
2014-04-25 13:29:35 +08:00
|
|
|
I->setOperand(i, nullptr);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2008-11-28 09:20:46 +08:00
|
|
|
if (!OpV->use_empty()) continue;
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2008-11-28 09:20:46 +08:00
|
|
|
// If the operand is an instruction that became dead as we nulled out the
|
|
|
|
// operand, and if it is 'trivially' dead, delete it in a future loop
|
|
|
|
// iteration.
|
|
|
|
if (Instruction *OpI = dyn_cast<Instruction>(OpV))
|
2012-08-29 23:32:21 +08:00
|
|
|
if (isInstructionTriviallyDead(OpI, TLI))
|
2008-11-28 09:20:46 +08:00
|
|
|
DeadInsts.push_back(OpI);
|
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2008-11-28 09:20:46 +08:00
|
|
|
I->eraseFromParent();
|
2010-01-06 00:27:25 +08:00
|
|
|
} while (!DeadInsts.empty());
|
2010-01-05 23:45:31 +08:00
|
|
|
|
|
|
|
return true;
|
2002-05-08 02:07:59 +08:00
|
|
|
}
|
2008-11-27 15:43:12 +08:00
|
|
|
|
2011-02-20 16:38:20 +08:00
|
|
|
/// areAllUsesEqual - Check whether the uses of a value are all the same.
|
|
|
|
/// This is similar to Instruction::hasOneUse() except this will also return
|
2011-02-22 00:27:36 +08:00
|
|
|
/// true when there are no uses or multiple uses that all refer to the same
|
|
|
|
/// value.
|
2011-02-20 16:38:20 +08:00
|
|
|
static bool areAllUsesEqual(Instruction *I) {
|
2014-03-09 11:16:01 +08:00
|
|
|
Value::user_iterator UI = I->user_begin();
|
|
|
|
Value::user_iterator UE = I->user_end();
|
2011-02-20 16:38:20 +08:00
|
|
|
if (UI == UE)
|
2011-02-22 00:27:36 +08:00
|
|
|
return true;
|
2011-02-20 16:38:20 +08:00
|
|
|
|
|
|
|
User *TheUse = *UI;
|
|
|
|
for (++UI; UI != UE; ++UI) {
|
|
|
|
if (*UI != TheUse)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-05-03 02:29:22 +08:00
|
|
|
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
|
|
|
|
/// dead PHI node, due to being a def-use chain of single-use nodes that
|
|
|
|
/// either forms a cycle or is terminated by a trivially dead instruction,
|
|
|
|
/// delete it. If that makes any of its operands trivially dead, delete them
|
2011-02-22 01:32:05 +08:00
|
|
|
/// too, recursively. Return true if a change was made.
|
2012-08-29 23:32:21 +08:00
|
|
|
bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2011-02-22 00:27:36 +08:00
|
|
|
SmallPtrSet<Instruction*, 4> Visited;
|
|
|
|
for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
|
2014-03-09 11:16:01 +08:00
|
|
|
I = cast<Instruction>(*I->user_begin())) {
|
2011-02-22 00:27:36 +08:00
|
|
|
if (I->use_empty())
|
2012-08-29 23:32:21 +08:00
|
|
|
return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
|
2009-05-03 02:29:22 +08:00
|
|
|
|
2011-02-22 00:27:36 +08:00
|
|
|
// If we find an instruction more than once, we're on a cycle that
|
2009-05-03 02:29:22 +08:00
|
|
|
// won't prove fruitful.
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!Visited.insert(I).second) {
|
2011-02-22 00:27:36 +08:00
|
|
|
// Break the cycle and delete the instruction and its operands.
|
|
|
|
I->replaceAllUsesWith(UndefValue::get(I->getType()));
|
2012-08-29 23:32:21 +08:00
|
|
|
(void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
|
2011-02-22 01:32:05 +08:00
|
|
|
return true;
|
2011-02-22 00:27:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
2009-05-03 02:29:22 +08:00
|
|
|
}
|
2008-11-28 06:57:53 +08:00
|
|
|
|
2015-09-29 02:56:07 +08:00
|
|
|
static bool
|
|
|
|
simplifyAndDCEInstruction(Instruction *I,
|
|
|
|
SmallSetVector<Instruction *, 16> &WorkList,
|
|
|
|
const DataLayout &DL,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
|
|
|
if (isInstructionTriviallyDead(I, TLI)) {
|
|
|
|
// Null out all of the instruction's operands to see if any operand becomes
|
|
|
|
// dead as we go.
|
|
|
|
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
|
|
|
|
Value *OpV = I->getOperand(i);
|
|
|
|
I->setOperand(i, nullptr);
|
|
|
|
|
|
|
|
if (!OpV->use_empty() || I == OpV)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If the operand is an instruction that became dead as we nulled out the
|
|
|
|
// operand, and if it is 'trivially' dead, delete it in a future loop
|
|
|
|
// iteration.
|
|
|
|
if (Instruction *OpI = dyn_cast<Instruction>(OpV))
|
|
|
|
if (isInstructionTriviallyDead(OpI, TLI))
|
|
|
|
WorkList.insert(OpI);
|
|
|
|
}
|
|
|
|
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Value *SimpleV = SimplifyInstruction(I, DL)) {
|
|
|
|
// Add the users to the worklist. CAREFUL: an instruction can use itself,
|
|
|
|
// in the case of a phi node.
|
2016-06-25 08:04:10 +08:00
|
|
|
for (User *U : I->users()) {
|
|
|
|
if (U != I) {
|
2015-09-29 02:56:07 +08:00
|
|
|
WorkList.insert(cast<Instruction>(U));
|
2016-06-25 08:04:10 +08:00
|
|
|
}
|
|
|
|
}
|
2015-09-29 02:56:07 +08:00
|
|
|
|
|
|
|
// Replace the instruction with its simplified value.
|
2016-06-25 08:04:10 +08:00
|
|
|
bool Changed = false;
|
|
|
|
if (!I->use_empty()) {
|
|
|
|
I->replaceAllUsesWith(SimpleV);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
if (isInstructionTriviallyDead(I, TLI)) {
|
|
|
|
I->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
return Changed;
|
2015-09-29 02:56:07 +08:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-01-13 03:40:54 +08:00
|
|
|
/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
|
|
|
|
/// simplify any instructions in it and recursively delete dead instructions.
|
|
|
|
///
|
|
|
|
/// This returns true if it changed the code, note that it can delete
|
|
|
|
/// instructions in other blocks as well in this block.
|
2015-03-10 10:37:25 +08:00
|
|
|
bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
|
2012-08-29 23:32:21 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2010-01-13 03:40:54 +08:00
|
|
|
bool MadeChange = false;
|
2015-09-29 02:56:07 +08:00
|
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
2012-03-25 11:29:25 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// In debug builds, ensure that the terminator of the block is never replaced
|
|
|
|
// or deleted by these simplifications. The idea of simplification is that it
|
|
|
|
// cannot introduce new instructions, and there is no way to replace the
|
|
|
|
// terminator of a block without introducing a new instruction.
|
2015-10-13 10:39:05 +08:00
|
|
|
AssertingVH<Instruction> TerminatorVH(&BB->back());
|
2012-03-25 11:29:25 +08:00
|
|
|
#endif
|
|
|
|
|
2015-09-29 02:56:07 +08:00
|
|
|
SmallSetVector<Instruction *, 16> WorkList;
|
|
|
|
// Iterate over the original function, only adding insts to the worklist
|
|
|
|
// if they actually need to be revisited. This avoids having to pre-init
|
|
|
|
// the worklist with the entire function's worth of instructions.
|
2016-05-22 05:12:06 +08:00
|
|
|
for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
|
|
|
|
BI != E;) {
|
2012-03-25 07:03:27 +08:00
|
|
|
assert(!BI->isTerminator());
|
2015-09-29 02:56:07 +08:00
|
|
|
Instruction *I = &*BI;
|
|
|
|
++BI;
|
|
|
|
|
|
|
|
// We're visiting this instruction now, so make sure it's not in the
|
|
|
|
// worklist from an earlier visit.
|
|
|
|
if (!WorkList.count(I))
|
|
|
|
MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
|
|
|
|
}
|
2011-04-03 06:45:17 +08:00
|
|
|
|
2015-09-29 02:56:07 +08:00
|
|
|
while (!WorkList.empty()) {
|
|
|
|
Instruction *I = WorkList.pop_back_val();
|
|
|
|
MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
|
2010-01-13 03:40:54 +08:00
|
|
|
}
|
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
2008-11-27 15:43:12 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-11-11 06:26:15 +08:00
|
|
|
// Control Flow Graph Restructuring.
|
2008-11-27 15:43:12 +08:00
|
|
|
//
|
|
|
|
|
2009-11-11 06:26:15 +08:00
|
|
|
|
|
|
|
/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
|
|
|
|
/// method is called when we're about to delete Pred as a predecessor of BB. If
|
|
|
|
/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
|
|
|
|
///
|
|
|
|
/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
|
|
|
|
/// nodes that collapse into identity values. For example, if we have:
|
|
|
|
/// x = phi(1, 0, 0, 0)
|
|
|
|
/// y = and x, z
|
|
|
|
///
|
|
|
|
/// .. and delete the predecessor corresponding to the '1', this will attempt to
|
|
|
|
/// recursively fold the and to 0.
|
2015-03-10 10:37:25 +08:00
|
|
|
void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
|
2009-11-11 06:26:15 +08:00
|
|
|
// This only adjusts blocks with PHI nodes.
|
|
|
|
if (!isa<PHINode>(BB->begin()))
|
|
|
|
return;
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-11 06:26:15 +08:00
|
|
|
// Remove the entries for Pred from the PHI nodes in BB, but do not simplify
|
|
|
|
// them down. This will leave us with single entry phi nodes and other phis
|
|
|
|
// that can be removed.
|
|
|
|
BB->removePredecessor(Pred, true);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2009-11-11 06:26:15 +08:00
|
|
|
WeakVH PhiIt = &BB->front();
|
|
|
|
while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
|
|
|
|
PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
|
2012-03-25 05:11:24 +08:00
|
|
|
Value *OldPhiIt = PhiIt;
|
2010-11-17 12:12:05 +08:00
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!recursivelySimplifyInstruction(PN))
|
2012-03-25 05:11:24 +08:00
|
|
|
continue;
|
2010-11-17 12:12:05 +08:00
|
|
|
|
2009-11-11 06:26:15 +08:00
|
|
|
// If recursive simplification ended up deleting the next PHI node we would
|
|
|
|
// iterate to, then our iterator is invalid, restart scanning from the top
|
|
|
|
// of the block.
|
2010-07-15 14:06:04 +08:00
|
|
|
if (PhiIt != OldPhiIt) PhiIt = &BB->front();
|
2009-11-11 06:26:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-27 15:43:12 +08:00
|
|
|
/// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
|
|
|
|
/// predecessor is known to have one successor (DestBB!). Eliminate the edge
|
|
|
|
/// between them, moving the instructions in the predecessor into DestBB and
|
|
|
|
/// deleting the predecessor block.
|
|
|
|
///
|
2015-01-20 09:37:09 +08:00
|
|
|
void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
|
2008-11-27 15:43:12 +08:00
|
|
|
// If BB has single-entry PHI nodes, fold them.
|
|
|
|
while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
|
|
|
|
Value *NewVal = PN->getIncomingValue(0);
|
|
|
|
// Replace self referencing PHI with undef, it must be dead.
|
2009-07-31 07:03:37 +08:00
|
|
|
if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
|
2008-11-27 15:43:12 +08:00
|
|
|
PN->replaceAllUsesWith(NewVal);
|
|
|
|
PN->eraseFromParent();
|
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2008-11-27 15:43:12 +08:00
|
|
|
BasicBlock *PredBB = DestBB->getSinglePredecessor();
|
|
|
|
assert(PredBB && "Block doesn't have a single predecessor!");
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2010-02-16 04:47:49 +08:00
|
|
|
// Zap anything that took the address of DestBB. Not doing this will give the
|
|
|
|
// address an invalid value.
|
|
|
|
if (DestBB->hasAddressTaken()) {
|
|
|
|
BlockAddress *BA = BlockAddress::get(DestBB);
|
|
|
|
Constant *Replacement =
|
|
|
|
ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1);
|
|
|
|
BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
|
|
|
|
BA->getType()));
|
|
|
|
BA->destroyConstant();
|
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2008-11-27 15:43:12 +08:00
|
|
|
// Anything that branched to PredBB now branches to DestBB.
|
|
|
|
PredBB->replaceAllUsesWith(DestBB);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2011-06-23 17:09:15 +08:00
|
|
|
// Splice all the instructions from PredBB to DestBB.
|
|
|
|
PredBB->getTerminator()->eraseFromParent();
|
2013-10-21 12:09:17 +08:00
|
|
|
DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
|
2011-06-23 17:09:15 +08:00
|
|
|
|
2014-07-12 15:12:47 +08:00
|
|
|
// If the PredBB is the entry block of the function, move DestBB up to
|
|
|
|
// become the entry block after we erase PredBB.
|
|
|
|
if (PredBB == &DestBB->getParent()->getEntryBlock())
|
|
|
|
DestBB->moveAfter(PredBB);
|
|
|
|
|
2015-01-20 09:37:09 +08:00
|
|
|
if (DT) {
|
|
|
|
BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock();
|
|
|
|
DT->changeImmediateDominator(DestBB, PredBBIDom);
|
|
|
|
DT->eraseNode(PredBB);
|
2009-09-16 17:26:52 +08:00
|
|
|
}
|
2008-11-27 15:43:12 +08:00
|
|
|
// Nuke BB.
|
|
|
|
PredBB->eraseFromParent();
|
|
|
|
}
|
2009-02-10 15:00:59 +08:00
|
|
|
|
2013-07-11 16:28:20 +08:00
|
|
|
/// CanMergeValues - Return true if we can choose one of these values to use
|
|
|
|
/// in place of the other. Note that we will always choose the non-undef
|
|
|
|
/// value to keep.
|
|
|
|
static bool CanMergeValues(Value *First, Value *Second) {
|
|
|
|
return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
|
|
|
|
}
|
|
|
|
|
2009-11-10 13:59:26 +08:00
|
|
|
/// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
|
2013-08-15 06:11:42 +08:00
|
|
|
/// almost-empty BB ending in an unconditional branch to Succ, into Succ.
|
2009-11-10 13:59:26 +08:00
|
|
|
///
|
|
|
|
/// Assumption: Succ is the single successor for BB.
|
|
|
|
///
|
|
|
|
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
|
|
|
|
assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
|
|
|
|
|
2013-07-23 07:16:36 +08:00
|
|
|
DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
|
2009-11-10 13:59:26 +08:00
|
|
|
<< Succ->getName() << "\n");
|
|
|
|
// Shortcut, if there is only a single predecessor it must be BB and merging
|
|
|
|
// is always safe
|
|
|
|
if (Succ->getSinglePredecessor()) return true;
|
|
|
|
|
|
|
|
// Make a list of the predecessors of BB
|
2011-12-07 00:14:29 +08:00
|
|
|
SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
|
2009-11-10 13:59:26 +08:00
|
|
|
|
|
|
|
// Look at all the phi nodes in Succ, to see if they present a conflict when
|
|
|
|
// merging these blocks
|
|
|
|
for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
|
|
|
|
PHINode *PN = cast<PHINode>(I);
|
|
|
|
|
|
|
|
// If the incoming value from BB is again a PHINode in
|
|
|
|
// BB which has the same incoming value for *PI as PN does, we can
|
|
|
|
// merge the phi nodes and then the blocks can still be merged
|
|
|
|
PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
|
|
|
|
if (BBPN && BBPN->getParent() == BB) {
|
2011-12-07 00:14:29 +08:00
|
|
|
for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
|
|
|
|
BasicBlock *IBB = PN->getIncomingBlock(PI);
|
|
|
|
if (BBPreds.count(IBB) &&
|
2013-07-11 16:28:20 +08:00
|
|
|
!CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
|
|
|
|
PN->getIncomingValue(PI))) {
|
2013-07-23 07:16:36 +08:00
|
|
|
DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
|
|
|
|
<< Succ->getName() << " is conflicting with "
|
2009-11-10 13:59:26 +08:00
|
|
|
<< BBPN->getName() << " with regard to common predecessor "
|
2011-12-07 00:14:29 +08:00
|
|
|
<< IBB->getName() << "\n");
|
2009-11-10 13:59:26 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Value* Val = PN->getIncomingValueForBlock(BB);
|
2011-12-07 00:14:29 +08:00
|
|
|
for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
|
2009-11-10 13:59:26 +08:00
|
|
|
// See if the incoming value for the common predecessor is equal to the
|
|
|
|
// one for BB, in which case this phi node will not prevent the merging
|
|
|
|
// of the block.
|
2011-12-07 00:14:29 +08:00
|
|
|
BasicBlock *IBB = PN->getIncomingBlock(PI);
|
2013-07-11 16:28:20 +08:00
|
|
|
if (BBPreds.count(IBB) &&
|
|
|
|
!CanMergeValues(Val, PN->getIncomingValue(PI))) {
|
2013-07-23 07:16:36 +08:00
|
|
|
DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
|
2009-11-10 13:59:26 +08:00
|
|
|
<< Succ->getName() << " is conflicting with regard to common "
|
2011-12-07 00:14:29 +08:00
|
|
|
<< "predecessor " << IBB->getName() << "\n");
|
2009-11-10 13:59:26 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-11 16:28:20 +08:00
|
|
|
typedef SmallVector<BasicBlock *, 16> PredBlockVector;
|
|
|
|
typedef DenseMap<BasicBlock *, Value *> IncomingValueMap;
|
|
|
|
|
|
|
|
/// \brief Determines the value to use as the phi node input for a block.
|
|
|
|
///
|
|
|
|
/// Select between \p OldVal any value that we know flows from \p BB
|
|
|
|
/// to a particular phi on the basis of which one (if either) is not
|
|
|
|
/// undef. Update IncomingValues based on the selected value.
|
|
|
|
///
|
|
|
|
/// \param OldVal The value we are considering selecting.
|
|
|
|
/// \param BB The block that the value flows in from.
|
|
|
|
/// \param IncomingValues A map from block-to-value for other phi inputs
|
|
|
|
/// that we have examined.
|
|
|
|
///
|
|
|
|
/// \returns the selected value.
|
|
|
|
static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
|
|
|
|
IncomingValueMap &IncomingValues) {
|
|
|
|
if (!isa<UndefValue>(OldVal)) {
|
|
|
|
assert((!IncomingValues.count(BB) ||
|
|
|
|
IncomingValues.find(BB)->second == OldVal) &&
|
|
|
|
"Expected OldVal to match incoming value from BB!");
|
|
|
|
|
|
|
|
IncomingValues.insert(std::make_pair(BB, OldVal));
|
|
|
|
return OldVal;
|
|
|
|
}
|
|
|
|
|
|
|
|
IncomingValueMap::const_iterator It = IncomingValues.find(BB);
|
|
|
|
if (It != IncomingValues.end()) return It->second;
|
|
|
|
|
|
|
|
return OldVal;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Create a map from block to value for the operands of a
|
|
|
|
/// given phi.
|
|
|
|
///
|
|
|
|
/// Create a map from block to value for each non-undef value flowing
|
|
|
|
/// into \p PN.
|
|
|
|
///
|
|
|
|
/// \param PN The phi we are collecting the map for.
|
|
|
|
/// \param IncomingValues [out] The map from block to value for this phi.
|
|
|
|
static void gatherIncomingValuesToPhi(PHINode *PN,
|
|
|
|
IncomingValueMap &IncomingValues) {
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
BasicBlock *BB = PN->getIncomingBlock(i);
|
|
|
|
Value *V = PN->getIncomingValue(i);
|
|
|
|
|
|
|
|
if (!isa<UndefValue>(V))
|
|
|
|
IncomingValues.insert(std::make_pair(BB, V));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Replace the incoming undef values to a phi with the values
|
|
|
|
/// from a block-to-value map.
|
|
|
|
///
|
|
|
|
/// \param PN The phi we are replacing the undefs in.
|
|
|
|
/// \param IncomingValues A map from block to value.
|
|
|
|
static void replaceUndefValuesInPhi(PHINode *PN,
|
|
|
|
const IncomingValueMap &IncomingValues) {
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
Value *V = PN->getIncomingValue(i);
|
|
|
|
|
|
|
|
if (!isa<UndefValue>(V)) continue;
|
|
|
|
|
|
|
|
BasicBlock *BB = PN->getIncomingBlock(i);
|
|
|
|
IncomingValueMap::const_iterator It = IncomingValues.find(BB);
|
|
|
|
if (It == IncomingValues.end()) continue;
|
|
|
|
|
|
|
|
PN->setIncomingValue(i, It->second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Replace a value flowing from a block to a phi with
|
|
|
|
/// potentially multiple instances of that value flowing from the
|
|
|
|
/// block's predecessors to the phi.
|
|
|
|
///
|
|
|
|
/// \param BB The block with the value flowing into the phi.
|
|
|
|
/// \param BBPreds The predecessors of BB.
|
|
|
|
/// \param PN The phi that we are updating.
|
|
|
|
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
|
|
|
|
const PredBlockVector &BBPreds,
|
|
|
|
PHINode *PN) {
|
|
|
|
Value *OldVal = PN->removeIncomingValue(BB, false);
|
|
|
|
assert(OldVal && "No entry in PHI for Pred BB!");
|
|
|
|
|
|
|
|
IncomingValueMap IncomingValues;
|
|
|
|
|
|
|
|
// We are merging two blocks - BB, and the block containing PN - and
|
|
|
|
// as a result we need to redirect edges from the predecessors of BB
|
|
|
|
// to go to the block containing PN, and update PN
|
|
|
|
// accordingly. Since we allow merging blocks in the case where the
|
|
|
|
// predecessor and successor blocks both share some predecessors,
|
|
|
|
// and where some of those common predecessors might have undef
|
|
|
|
// values flowing into PN, we want to rewrite those values to be
|
|
|
|
// consistent with the non-undef values.
|
|
|
|
|
|
|
|
gatherIncomingValuesToPhi(PN, IncomingValues);
|
|
|
|
|
|
|
|
// If this incoming value is one of the PHI nodes in BB, the new entries
|
|
|
|
// in the PHI node are the entries from the old PHI.
|
|
|
|
if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
|
|
|
|
PHINode *OldValPN = cast<PHINode>(OldVal);
|
|
|
|
for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
// Note that, since we are merging phi nodes and BB and Succ might
|
|
|
|
// have common predecessors, we could end up with a phi node with
|
|
|
|
// identical incoming branches. This will be cleaned up later (and
|
|
|
|
// will trigger asserts if we try to clean it up now, without also
|
|
|
|
// simplifying the corresponding conditional branch).
|
|
|
|
BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
|
|
|
|
Value *PredVal = OldValPN->getIncomingValue(i);
|
|
|
|
Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
|
|
|
|
IncomingValues);
|
|
|
|
|
|
|
|
// And add a new incoming value for this predecessor for the
|
|
|
|
// newly retargeted branch.
|
|
|
|
PN->addIncoming(Selected, PredBB);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
|
|
|
|
// Update existing incoming values in PN for this
|
|
|
|
// predecessor of BB.
|
|
|
|
BasicBlock *PredBB = BBPreds[i];
|
|
|
|
Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
|
|
|
|
IncomingValues);
|
|
|
|
|
|
|
|
// And add a new incoming value for this predecessor for the
|
|
|
|
// newly retargeted branch.
|
|
|
|
PN->addIncoming(Selected, PredBB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
replaceUndefValuesInPhi(PN, IncomingValues);
|
|
|
|
}
|
|
|
|
|
2009-11-10 13:59:26 +08:00
|
|
|
/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
|
|
|
|
/// unconditional branch, and contains no instructions other than PHI nodes,
|
2011-07-01 04:14:24 +08:00
|
|
|
/// potential side-effect free intrinsics and the branch. If possible,
|
|
|
|
/// eliminate BB by rewriting all the predecessors to branch to the successor
|
|
|
|
/// block and return true. If we can't transform, return false.
|
2009-11-10 13:59:26 +08:00
|
|
|
bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
|
2010-08-14 08:29:42 +08:00
|
|
|
assert(BB != &BB->getParent()->getEntryBlock() &&
|
|
|
|
"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
|
|
|
|
|
2009-11-10 13:59:26 +08:00
|
|
|
// We can't eliminate infinite loops.
|
|
|
|
BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
|
|
|
|
if (BB == Succ) return false;
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
// Check to see if merging these blocks would cause conflicts for any of the
|
|
|
|
// phi nodes in BB or Succ. If not, we can safely merge.
|
|
|
|
if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
|
|
|
|
|
|
|
|
// Check for cases where Succ has multiple predecessors and a PHI node in BB
|
|
|
|
// has uses which will not disappear when the PHI nodes are merged. It is
|
|
|
|
// possible to handle such cases, but difficult: it requires checking whether
|
|
|
|
// BB dominates Succ, which is non-trivial to calculate in the case where
|
|
|
|
// Succ has multiple predecessors. Also, it requires checking whether
|
|
|
|
// constructing the necessary self-referential PHI node doesn't introduce any
|
|
|
|
// conflicts; this isn't too difficult, but the previous code for doing this
|
|
|
|
// was incorrect.
|
|
|
|
//
|
|
|
|
// Note that if this check finds a live use, BB dominates Succ, so BB is
|
|
|
|
// something like a loop pre-header (or rarely, a part of an irreducible CFG);
|
|
|
|
// folding the branch isn't profitable in that case anyway.
|
|
|
|
if (!Succ->getSinglePredecessor()) {
|
|
|
|
BasicBlock::iterator BBI = BB->begin();
|
|
|
|
while (isa<PHINode>(*BBI)) {
|
|
|
|
for (Use &U : BBI->uses()) {
|
|
|
|
if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
|
|
|
|
if (PN->getIncomingBlock(U) != BB)
|
2009-11-10 13:59:26 +08:00
|
|
|
return false;
|
2016-05-03 03:43:22 +08:00
|
|
|
} else {
|
|
|
|
return false;
|
2009-11-10 13:59:26 +08:00
|
|
|
}
|
|
|
|
}
|
2016-05-03 03:43:22 +08:00
|
|
|
++BBI;
|
2009-11-10 13:59:26 +08:00
|
|
|
}
|
2016-05-03 03:43:22 +08:00
|
|
|
}
|
2009-11-10 13:59:26 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
if (isa<PHINode>(Succ->begin())) {
|
|
|
|
// If there is more than one pred of succ, and there are PHI nodes in
|
|
|
|
// the successor, then we need to add incoming edges for the PHI nodes
|
|
|
|
//
|
|
|
|
const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
// Loop over all of the PHI nodes in the successor of BB.
|
|
|
|
for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
|
|
|
|
PHINode *PN = cast<PHINode>(I);
|
2013-07-11 16:28:20 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
|
2009-11-10 13:59:26 +08:00
|
|
|
}
|
2016-05-03 03:43:22 +08:00
|
|
|
}
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
if (Succ->getSinglePredecessor()) {
|
|
|
|
// BB is the only predecessor of Succ, so Succ will end up with exactly
|
|
|
|
// the same predecessors BB had.
|
2011-07-01 04:14:24 +08:00
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
// Copy over any phi, debug or lifetime instruction.
|
|
|
|
BB->getTerminator()->eraseFromParent();
|
|
|
|
Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
|
|
|
|
BB->getInstList());
|
|
|
|
} else {
|
|
|
|
while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
|
|
|
|
// We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
|
|
|
|
assert(PN->use_empty() && "There shouldn't be any uses here!");
|
|
|
|
PN->eraseFromParent();
|
2009-11-10 13:59:26 +08:00
|
|
|
}
|
2016-05-03 01:22:54 +08:00
|
|
|
}
|
2016-05-03 03:43:22 +08:00
|
|
|
|
2016-11-18 21:12:07 +08:00
|
|
|
// If the unconditional branch we replaced contains llvm.loop metadata, we
|
|
|
|
// add the metadata to the branch instructions in the predecessors.
|
|
|
|
unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
|
|
|
|
Instruction *TI = BB->getTerminator();
|
|
|
|
if (TI)
|
|
|
|
if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
|
|
|
|
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
|
|
|
|
BasicBlock *Pred = *PI;
|
|
|
|
Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
|
|
|
|
}
|
|
|
|
|
2016-05-03 03:43:22 +08:00
|
|
|
// Everything that jumped to BB now goes to Succ.
|
|
|
|
BB->replaceAllUsesWith(Succ);
|
|
|
|
if (!Succ->hasName()) Succ->takeName(BB);
|
|
|
|
BB->eraseFromParent(); // Delete the old basic block.
|
|
|
|
return true;
|
2009-11-10 13:59:26 +08:00
|
|
|
}
|
|
|
|
|
2009-12-03 01:06:45 +08:00
|
|
|
/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
|
|
|
|
/// nodes in this block. This doesn't try to be clever about PHI nodes
|
|
|
|
/// which differ only in the order of the incoming values, but instcombine
|
|
|
|
/// orders them so it usually won't matter.
|
|
|
|
///
|
|
|
|
bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
|
|
|
|
// This implementation doesn't currently consider undef operands
|
2011-06-28 11:57:31 +08:00
|
|
|
// specially. Theoretically, two phis which are identical except for
|
2009-12-03 01:06:45 +08:00
|
|
|
// one having an undef where the other doesn't could be collapsed.
|
|
|
|
|
2015-06-19 00:01:00 +08:00
|
|
|
struct PHIDenseMapInfo {
|
|
|
|
static PHINode *getEmptyKey() {
|
|
|
|
return DenseMapInfo<PHINode *>::getEmptyKey();
|
|
|
|
}
|
|
|
|
static PHINode *getTombstoneKey() {
|
|
|
|
return DenseMapInfo<PHINode *>::getTombstoneKey();
|
|
|
|
}
|
|
|
|
static unsigned getHashValue(PHINode *PN) {
|
|
|
|
// Compute a hash value on the operands. Instcombine will likely have
|
|
|
|
// sorted them, which helps expose duplicates, but we have to check all
|
|
|
|
// the operands to be safe in case instcombine hasn't run.
|
|
|
|
return static_cast<unsigned>(hash_combine(
|
|
|
|
hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
|
|
|
|
hash_combine_range(PN->block_begin(), PN->block_end())));
|
|
|
|
}
|
|
|
|
static bool isEqual(PHINode *LHS, PHINode *RHS) {
|
|
|
|
if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
|
|
|
|
RHS == getEmptyKey() || RHS == getTombstoneKey())
|
|
|
|
return LHS == RHS;
|
|
|
|
return LHS->isIdenticalTo(RHS);
|
|
|
|
}
|
|
|
|
};
|
2009-12-03 01:06:45 +08:00
|
|
|
|
2015-06-19 00:01:00 +08:00
|
|
|
// Set of unique PHINodes.
|
|
|
|
DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
|
2009-12-03 01:06:45 +08:00
|
|
|
|
|
|
|
// Examine each PHI.
|
2015-06-19 00:01:00 +08:00
|
|
|
bool Changed = false;
|
|
|
|
for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
|
|
|
|
auto Inserted = PHISet.insert(PN);
|
|
|
|
if (!Inserted.second) {
|
|
|
|
// A duplicate. Replace this PHI with its duplicate.
|
|
|
|
PN->replaceAllUsesWith(*Inserted.first);
|
|
|
|
PN->eraseFromParent();
|
|
|
|
Changed = true;
|
[RemoveDuplicatePHINodes] Start over after removing a PHI.
This makes RemoveDuplicatePHINodes more effective and fixes an assertion
failure. Triggering the assertions requires a DenseSet reallocation
so this change only contains a constructive test.
I'll explain the issue with a small example. In the following function
there's a duplicate PHI, %4 and %5 are identical. When this is found
the DenseSet in RemoveDuplicatePHINodes contains %2, %3 and %4.
define void @F() {
br label %1
; <label>:1 ; preds = %1, %0
%2 = phi i32 [ 42, %0 ], [ %4, %1 ]
%3 = phi i32 [ 42, %0 ], [ %5, %1 ]
%4 = phi i32 [ 42, %0 ], [ 23, %1 ]
%5 = phi i32 [ 42, %0 ], [ 23, %1 ]
br label %1
}
after RemoveDuplicatePHINodes runs the function looks like this. %3 has
changed and is now identical to %2, but RemoveDuplicatePHINodes never
saw this.
define void @F() {
br label %1
; <label>:1 ; preds = %1, %0
%2 = phi i32 [ 42, %0 ], [ %4, %1 ]
%3 = phi i32 [ 42, %0 ], [ %4, %1 ]
%4 = phi i32 [ 42, %0 ], [ 23, %1 ]
br label %1
}
If the DenseSet does a reallocation now it will reinsert all
keys and stumble over %3 now having a different hash value than it had
when inserted into the map for the first time. This change clears the
set whenever a PHI is deleted and starts the progress from the
beginning, allowing %3 to be deleted and avoiding inconsistent DenseSet
state. This potentially has a negative performance impact because
it rescans all PHIs, but I don't think that this ever makes a difference
in practice.
llvm-svn: 246694
2015-09-03 03:52:23 +08:00
|
|
|
|
|
|
|
// The RAUW can change PHIs that we already visited. Start over from the
|
|
|
|
// beginning.
|
|
|
|
PHISet.clear();
|
|
|
|
I = BB->begin();
|
2009-12-03 01:06:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
2010-12-26 04:37:57 +08:00
|
|
|
|
|
|
|
/// enforceKnownAlignment - If the specified pointer points to an object that
|
|
|
|
/// we control, modify the object's alignment to PrefAlign. This isn't
|
|
|
|
/// often possible though. If alignment is important, a more reliable approach
|
|
|
|
/// is to simply align all global variables and allocation instructions to
|
|
|
|
/// their preferred alignment from the beginning.
|
|
|
|
///
|
2010-12-31 06:34:44 +08:00
|
|
|
static unsigned enforceKnownAlignment(Value *V, unsigned Align,
|
2015-03-10 10:37:25 +08:00
|
|
|
unsigned PrefAlign,
|
|
|
|
const DataLayout &DL) {
|
2016-01-16 00:33:06 +08:00
|
|
|
assert(PrefAlign > Align);
|
|
|
|
|
2011-06-16 05:08:25 +08:00
|
|
|
V = V->stripPointerCasts();
|
2010-12-26 04:37:57 +08:00
|
|
|
|
2011-06-16 05:08:25 +08:00
|
|
|
if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
|
2016-01-16 00:33:06 +08:00
|
|
|
// TODO: ideally, computeKnownBits ought to have used
|
|
|
|
// AllocaInst::getAlignment() in its computation already, making
|
|
|
|
// the below max redundant. But, as it turns out,
|
|
|
|
// stripPointerCasts recurses through infinite layers of bitcasts,
|
|
|
|
// while computeKnownBits is not allowed to traverse more than 6
|
|
|
|
// levels.
|
|
|
|
Align = std::max(AI->getAlignment(), Align);
|
|
|
|
if (PrefAlign <= Align)
|
|
|
|
return Align;
|
|
|
|
|
2011-10-11 07:42:08 +08:00
|
|
|
// If the preferred alignment is greater than the natural stack alignment
|
|
|
|
// then don't round up. This avoids dynamic stack realignment.
|
2015-03-10 10:37:25 +08:00
|
|
|
if (DL.exceedsNaturalStackAlignment(PrefAlign))
|
2011-10-11 07:42:08 +08:00
|
|
|
return Align;
|
2010-12-26 04:37:57 +08:00
|
|
|
AI->setAlignment(PrefAlign);
|
|
|
|
return PrefAlign;
|
|
|
|
}
|
|
|
|
|
2014-05-14 02:45:48 +08:00
|
|
|
if (auto *GO = dyn_cast<GlobalObject>(V)) {
|
2016-01-16 00:33:06 +08:00
|
|
|
// TODO: as above, this shouldn't be necessary.
|
|
|
|
Align = std::max(GO->getAlignment(), Align);
|
|
|
|
if (PrefAlign <= Align)
|
|
|
|
return Align;
|
|
|
|
|
2010-12-26 04:37:57 +08:00
|
|
|
// If there is a large requested alignment and we can, bump up the alignment
|
2015-07-14 08:11:08 +08:00
|
|
|
// of the global. If the memory we set aside for the global may not be the
|
|
|
|
// memory used by the final program then it is impossible for us to reliably
|
|
|
|
// enforce the preferred alignment.
|
2016-01-16 00:33:06 +08:00
|
|
|
if (!GO->canIncreaseAlignment())
|
2014-05-10 00:01:06 +08:00
|
|
|
return Align;
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2016-01-16 00:33:06 +08:00
|
|
|
GO->setAlignment(PrefAlign);
|
|
|
|
return PrefAlign;
|
2010-12-26 04:37:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Align;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL,
|
Make use of @llvm.assume in ValueTracking (computeKnownBits, etc.)
This change, which allows @llvm.assume to be used from within computeKnownBits
(and other associated functions in ValueTracking), adds some (optional)
parameters to computeKnownBits and friends. These functions now (optionally)
take a "context" instruction pointer, an AssumptionTracker pointer, and also a
DomTree pointer, and most of the changes are just to pass this new information
when it is easily available from InstSimplify, InstCombine, etc.
As explained below, the significant conceptual change is that known properties
of a value might depend on the control-flow location of the use (because we
care that the @llvm.assume dominates the use because assumptions have
control-flow dependencies). This means that, when we ask if bits are known in a
value, we might get different answers for different uses.
The significant changes are all in ValueTracking. Two main changes: First, as
with the rest of the code, new parameters need to be passed around. To make
this easier, I grouped them into a structure, and I made internal static
versions of the relevant functions that take this structure as a parameter. The
new code does as you might expect, it looks for @llvm.assume calls that make
use of the value we're trying to learn something about (often indirectly),
attempts to pattern match that expression, and uses the result if successful.
By making use of the AssumptionTracker, the process of finding @llvm.assume
calls is not expensive.
Part of the structure being passed around inside ValueTracking is a set of
already-considered @llvm.assume calls. This is to prevent a query using, for
example, the assume(a == b), to recurse on itself. The context and DT params
are used to find applicable assumptions. An assumption needs to dominate the
context instruction, or come after it deterministically. In this latter case we
only handle the specific case where both the assumption and the context
instruction are in the same block, and we need to exclude assumptions from
being used to simplify their own ephemeral values (those which contribute only
to the assumption) because otherwise the assumption would prove its feeding
comparison trivial and would be removed.
This commit adds the plumbing and the logic for a simple masked-bit propagation
(just enough to write a regression test). Future commits add more patterns
(and, correspondingly, more regression tests).
llvm-svn: 217342
2014-09-08 02:57:58 +08:00
|
|
|
const Instruction *CxtI,
|
2016-12-19 16:22:17 +08:00
|
|
|
AssumptionCache *AC,
|
Make use of @llvm.assume in ValueTracking (computeKnownBits, etc.)
This change, which allows @llvm.assume to be used from within computeKnownBits
(and other associated functions in ValueTracking), adds some (optional)
parameters to computeKnownBits and friends. These functions now (optionally)
take a "context" instruction pointer, an AssumptionTracker pointer, and also a
DomTree pointer, and most of the changes are just to pass this new information
when it is easily available from InstSimplify, InstCombine, etc.
As explained below, the significant conceptual change is that known properties
of a value might depend on the control-flow location of the use (because we
care that the @llvm.assume dominates the use because assumptions have
control-flow dependencies). This means that, when we ask if bits are known in a
value, we might get different answers for different uses.
The significant changes are all in ValueTracking. Two main changes: First, as
with the rest of the code, new parameters need to be passed around. To make
this easier, I grouped them into a structure, and I made internal static
versions of the relevant functions that take this structure as a parameter. The
new code does as you might expect, it looks for @llvm.assume calls that make
use of the value we're trying to learn something about (often indirectly),
attempts to pattern match that expression, and uses the result if successful.
By making use of the AssumptionTracker, the process of finding @llvm.assume
calls is not expensive.
Part of the structure being passed around inside ValueTracking is a set of
already-considered @llvm.assume calls. This is to prevent a query using, for
example, the assume(a == b), to recurse on itself. The context and DT params
are used to find applicable assumptions. An assumption needs to dominate the
context instruction, or come after it deterministically. In this latter case we
only handle the specific case where both the assumption and the context
instruction are in the same block, and we need to exclude assumptions from
being used to simplify their own ephemeral values (those which contribute only
to the assumption) because otherwise the assumption would prove its feeding
comparison trivial and would be removed.
This commit adds the plumbing and the logic for a simple masked-bit propagation
(just enough to write a regression test). Future commits add more patterns
(and, correspondingly, more regression tests).
llvm-svn: 217342
2014-09-08 02:57:58 +08:00
|
|
|
const DominatorTree *DT) {
|
2010-12-26 04:37:57 +08:00
|
|
|
assert(V->getType()->isPointerTy() &&
|
|
|
|
"getOrEnforceKnownAlignment expects a pointer!");
|
2015-03-10 10:37:25 +08:00
|
|
|
unsigned BitWidth = DL.getPointerTypeSizeInBits(V->getType());
|
2013-08-02 06:42:18 +08:00
|
|
|
|
2010-12-26 04:37:57 +08:00
|
|
|
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
2016-12-19 16:22:17 +08:00
|
|
|
computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC, CxtI, DT);
|
2010-12-26 04:37:57 +08:00
|
|
|
unsigned TrailZ = KnownZero.countTrailingOnes();
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2013-07-24 06:20:57 +08:00
|
|
|
// Avoid trouble with ridiculously large TrailZ values, such as
|
2010-12-26 04:37:57 +08:00
|
|
|
// those computed from a null pointer.
|
|
|
|
TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2010-12-26 04:37:57 +08:00
|
|
|
unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2010-12-26 04:37:57 +08:00
|
|
|
// LLVM doesn't support alignments larger than this currently.
|
|
|
|
Align = std::min(Align, +Value::MaximumAlignment);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2010-12-26 04:37:57 +08:00
|
|
|
if (PrefAlign > Align)
|
2013-08-02 06:42:18 +08:00
|
|
|
Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
|
2013-07-23 07:16:36 +08:00
|
|
|
|
2010-12-26 04:37:57 +08:00
|
|
|
// We don't need to make any adjustment.
|
|
|
|
return Align;
|
|
|
|
}
|
|
|
|
|
2011-03-18 05:58:19 +08:00
|
|
|
///===---------------------------------------------------------------------===//
|
|
|
|
/// Dbg Intrinsic utilities
|
|
|
|
///
|
|
|
|
|
2013-04-27 01:48:33 +08:00
|
|
|
/// See if there is a dbg.value intrinsic for DIVar before I.
|
2016-02-18 04:02:25 +08:00
|
|
|
static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
|
|
|
|
Instruction *I) {
|
2013-04-27 01:48:33 +08:00
|
|
|
// Since we can't guarantee that the original dbg.declare instrinsic
|
|
|
|
// is removed by LowerDbgDeclare(), we need to make sure that we are
|
|
|
|
// not inserting the same dbg.value intrinsic over and over.
|
|
|
|
llvm::BasicBlock::InstListType::iterator PrevI(I);
|
|
|
|
if (PrevI != I->getParent()->getInstList().begin()) {
|
|
|
|
--PrevI;
|
|
|
|
if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
|
|
|
|
if (DVI->getValue() == I->getOperand(0) &&
|
|
|
|
DVI->getOffset() == 0 &&
|
2016-02-18 04:02:25 +08:00
|
|
|
DVI->getVariable() == DIVar &&
|
|
|
|
DVI->getExpression() == DIExpr)
|
2013-04-27 01:48:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-22 22:13:25 +08:00
|
|
|
/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
|
|
|
|
static bool PhiHasDebugValue(DILocalVariable *DIVar,
|
|
|
|
DIExpression *DIExpr,
|
|
|
|
PHINode *APN) {
|
|
|
|
// Since we can't guarantee that the original dbg.declare instrinsic
|
|
|
|
// is removed by LowerDbgDeclare(), we need to make sure that we are
|
|
|
|
// not inserting the same dbg.value intrinsic over and over.
|
|
|
|
DbgValueList DbgValues;
|
|
|
|
FindAllocaDbgValues(DbgValues, APN);
|
|
|
|
for (auto DVI : DbgValues) {
|
|
|
|
assert (DVI->getValue() == APN);
|
|
|
|
assert (DVI->getOffset() == 0);
|
|
|
|
if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-04-27 02:10:50 +08:00
|
|
|
/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
|
2011-03-18 05:58:19 +08:00
|
|
|
/// that has an associated llvm.dbg.decl intrinsic.
|
2016-09-22 22:13:25 +08:00
|
|
|
void llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
|
2011-03-18 05:58:19 +08:00
|
|
|
StoreInst *SI, DIBuilder &Builder) {
|
2015-04-22 02:44:06 +08:00
|
|
|
auto *DIVar = DDI->getVariable();
|
|
|
|
auto *DIExpr = DDI->getExpression();
|
2015-04-22 02:24:23 +08:00
|
|
|
assert(DIVar && "Missing variable");
|
2011-03-18 05:58:19 +08:00
|
|
|
|
2011-05-17 05:24:05 +08:00
|
|
|
// If an argument is zero extended then use argument directly. The ZExt
|
|
|
|
// may be zapped by an optimization pass in future.
|
2014-04-25 13:29:35 +08:00
|
|
|
Argument *ExtendedArg = nullptr;
|
2011-05-17 05:24:05 +08:00
|
|
|
if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
|
|
|
|
ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
|
|
|
|
if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
|
|
|
|
ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
|
2016-01-13 06:46:09 +08:00
|
|
|
if (ExtendedArg) {
|
2016-12-06 02:04:47 +08:00
|
|
|
// We're now only describing a subset of the variable. The fragment we're
|
2016-01-13 06:46:09 +08:00
|
|
|
// describing will always be smaller than the variable size, because
|
|
|
|
// VariableSize == Size of Alloca described by DDI. Since SI stores
|
|
|
|
// to the alloca described by DDI, if it's first operand is an extend,
|
|
|
|
// we're guaranteed that before extension, the value was narrower than
|
|
|
|
// the size of the alloca, hence the size of the described variable.
|
2016-02-18 04:02:25 +08:00
|
|
|
SmallVector<uint64_t, 3> Ops;
|
2016-12-06 02:04:47 +08:00
|
|
|
unsigned FragmentOffset = 0;
|
|
|
|
// If this already is a bit fragment, we drop the bit fragment from the
|
|
|
|
// expression and record the offset.
|
|
|
|
if (DIExpr->isFragment()) {
|
2016-02-18 04:02:25 +08:00
|
|
|
Ops.append(DIExpr->elements_begin(), DIExpr->elements_end()-3);
|
2016-12-06 02:04:47 +08:00
|
|
|
FragmentOffset = DIExpr->getFragmentOffsetInBits();
|
2016-01-13 06:46:09 +08:00
|
|
|
} else {
|
2016-02-18 04:02:25 +08:00
|
|
|
Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
|
2016-01-13 06:46:09 +08:00
|
|
|
}
|
2016-12-06 02:04:47 +08:00
|
|
|
Ops.push_back(dwarf::DW_OP_LLVM_fragment);
|
|
|
|
Ops.push_back(FragmentOffset);
|
2016-01-13 06:46:09 +08:00
|
|
|
const DataLayout &DL = DDI->getModule()->getDataLayout();
|
2016-12-06 02:04:47 +08:00
|
|
|
Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
|
2016-02-18 04:02:25 +08:00
|
|
|
auto NewDIExpr = Builder.createExpression(Ops);
|
|
|
|
if (!LdStHasDebugValue(DIVar, NewDIExpr, SI))
|
|
|
|
Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, NewDIExpr,
|
|
|
|
DDI->getDebugLoc(), SI);
|
|
|
|
} else if (!LdStHasDebugValue(DIVar, DIExpr, SI))
|
2015-04-16 21:29:36 +08:00
|
|
|
Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, DIExpr,
|
|
|
|
DDI->getDebugLoc(), SI);
|
2011-03-18 05:58:19 +08:00
|
|
|
}
|
|
|
|
|
2013-04-27 02:10:50 +08:00
|
|
|
/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
|
2011-03-19 07:45:43 +08:00
|
|
|
/// that has an associated llvm.dbg.decl intrinsic.
|
2016-09-22 22:13:25 +08:00
|
|
|
void llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
|
2011-03-19 07:45:43 +08:00
|
|
|
LoadInst *LI, DIBuilder &Builder) {
|
2015-04-22 02:44:06 +08:00
|
|
|
auto *DIVar = DDI->getVariable();
|
|
|
|
auto *DIExpr = DDI->getExpression();
|
2015-04-22 02:24:23 +08:00
|
|
|
assert(DIVar && "Missing variable");
|
2011-03-19 07:45:43 +08:00
|
|
|
|
2016-02-18 04:02:25 +08:00
|
|
|
if (LdStHasDebugValue(DIVar, DIExpr, LI))
|
2016-09-22 22:13:25 +08:00
|
|
|
return;
|
2013-04-27 01:48:33 +08:00
|
|
|
|
Clean up the processing of dbg.value in various places
Summary:
First up is instcombine, where in the dbg.declare -> dbg.value conversion,
the llvm.dbg.value needs to be called on the actual loaded value, rather
than the address (since the whole point of this transformation is to be
able to get rid of the alloca). Further, now that that's cleaned up, we
can remove a hack in the backend, that would add an implicit OP_deref if
the argument to dbg.value was an alloca. This stems from before the
existence of DIExpression and is no longer necessary since the deref can
be expressed explicitly.
Now, in order to make sure that the tests pass with this change, we need to
correct the printing of DEBUG_VALUE comments to take into account the
expression, which wasn't taken into account before.
Unfortunately, for both these changes, there were a number of incorrect
test cases (mostly the wrong number of DW_OP_derefs, but also a couple
where the test itself was broken more badly). aprantl and I have gone
through and adjusted these test case in order to make them pass with
these fixes and in some cases to make sure they're actually testing
what they are meant to test.
Reviewers: aprantl
Subscribers: dsanders
Differential Revision: http://reviews.llvm.org/D14186
llvm-svn: 256077
2015-12-19 10:02:44 +08:00
|
|
|
// We are now tracking the loaded value instead of the address. In the
|
|
|
|
// future if multi-location support is added to the IR, it might be
|
|
|
|
// preferable to keep tracking both the loaded value and the original
|
|
|
|
// address in case the alloca can not be elided.
|
|
|
|
Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
|
|
|
|
LI, 0, DIVar, DIExpr, DDI->getDebugLoc(), (Instruction *)nullptr);
|
|
|
|
DbgValue->insertAfter(LI);
|
2016-09-22 22:13:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Inserts a llvm.dbg.value intrinsic after a phi
|
|
|
|
/// that has an associated llvm.dbg.decl intrinsic.
|
|
|
|
void llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
|
|
|
|
PHINode *APN, DIBuilder &Builder) {
|
|
|
|
auto *DIVar = DDI->getVariable();
|
|
|
|
auto *DIExpr = DDI->getExpression();
|
|
|
|
assert(DIVar && "Missing variable");
|
|
|
|
|
|
|
|
if (PhiHasDebugValue(DIVar, DIExpr, APN))
|
|
|
|
return;
|
|
|
|
|
2016-09-28 02:45:31 +08:00
|
|
|
BasicBlock *BB = APN->getParent();
|
2016-09-22 22:13:25 +08:00
|
|
|
auto InsertionPt = BB->getFirstInsertionPt();
|
2016-09-28 02:45:31 +08:00
|
|
|
|
|
|
|
// The block may be a catchswitch block, which does not have a valid
|
|
|
|
// insertion point.
|
|
|
|
// FIXME: Insert dbg.value markers in the successors when appropriate.
|
|
|
|
if (InsertionPt != BB->end())
|
|
|
|
Builder.insertDbgValueIntrinsic(APN, 0, DIVar, DIExpr, DDI->getDebugLoc(),
|
|
|
|
&*InsertionPt);
|
2016-09-19 17:49:30 +08:00
|
|
|
}
|
|
|
|
|
2014-04-26 07:00:25 +08:00
|
|
|
/// Determine whether this alloca is either a VLA or an array.
|
|
|
|
static bool isArray(AllocaInst *AI) {
|
|
|
|
return AI->isArrayAllocation() ||
|
|
|
|
AI->getType()->getElementType()->isArrayTy();
|
|
|
|
}
|
|
|
|
|
2011-03-18 06:18:16 +08:00
|
|
|
/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
|
|
|
|
/// of llvm.dbg.value intrinsics.
|
|
|
|
bool llvm::LowerDbgDeclare(Function &F) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
|
2011-03-18 06:18:16 +08:00
|
|
|
SmallVector<DbgDeclareInst *, 4> Dbgs;
|
2014-03-28 07:30:04 +08:00
|
|
|
for (auto &FI : F)
|
2015-10-13 10:39:05 +08:00
|
|
|
for (Instruction &BI : FI)
|
|
|
|
if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
|
2011-03-18 06:18:16 +08:00
|
|
|
Dbgs.push_back(DDI);
|
2014-03-28 07:30:04 +08:00
|
|
|
|
2011-03-18 06:18:16 +08:00
|
|
|
if (Dbgs.empty())
|
|
|
|
return false;
|
|
|
|
|
2014-03-28 07:30:04 +08:00
|
|
|
for (auto &I : Dbgs) {
|
|
|
|
DbgDeclareInst *DDI = I;
|
2013-11-19 07:04:38 +08:00
|
|
|
AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
|
|
|
|
// If this is an alloca for a scalar variable, insert a dbg.value
|
|
|
|
// at each load and store to the alloca and erase the dbg.declare.
|
2014-04-26 04:49:25 +08:00
|
|
|
// The dbg.values allow tracking a variable even if it is not
|
|
|
|
// stored on the stack, while the dbg.declare can only describe
|
|
|
|
// the stack slot (and at a lexical-scope granularity). Later
|
|
|
|
// passes will attempt to elide the stack slot.
|
2014-04-26 07:00:25 +08:00
|
|
|
if (AI && !isArray(AI)) {
|
2016-01-15 03:12:27 +08:00
|
|
|
for (auto &AIUse : AI->uses()) {
|
|
|
|
User *U = AIUse.getUser();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
|
|
|
if (AIUse.getOperandNo() == 1)
|
|
|
|
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
|
|
|
|
} else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
2011-03-19 07:45:43 +08:00
|
|
|
ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
|
2016-01-15 03:12:27 +08:00
|
|
|
} else if (CallInst *CI = dyn_cast<CallInst>(U)) {
|
2014-10-28 19:53:30 +08:00
|
|
|
// This is a call by-value or some other instruction that
|
|
|
|
// takes a pointer to the variable. Insert a *value*
|
|
|
|
// intrinsic that describes the alloca.
|
Clean up the processing of dbg.value in various places
Summary:
First up is instcombine, where in the dbg.declare -> dbg.value conversion,
the llvm.dbg.value needs to be called on the actual loaded value, rather
than the address (since the whole point of this transformation is to be
able to get rid of the alloca). Further, now that that's cleaned up, we
can remove a hack in the backend, that would add an implicit OP_deref if
the argument to dbg.value was an alloca. This stems from before the
existence of DIExpression and is no longer necessary since the deref can
be expressed explicitly.
Now, in order to make sure that the tests pass with this change, we need to
correct the printing of DEBUG_VALUE comments to take into account the
expression, which wasn't taken into account before.
Unfortunately, for both these changes, there were a number of incorrect
test cases (mostly the wrong number of DW_OP_derefs, but also a couple
where the test itself was broken more badly). aprantl and I have gone
through and adjusted these test case in order to make them pass with
these fixes and in some cases to make sure they're actually testing
what they are meant to test.
Reviewers: aprantl
Subscribers: dsanders
Differential Revision: http://reviews.llvm.org/D14186
llvm-svn: 256077
2015-12-19 10:02:44 +08:00
|
|
|
SmallVector<uint64_t, 1> NewDIExpr;
|
|
|
|
auto *DIExpr = DDI->getExpression();
|
|
|
|
NewDIExpr.push_back(dwarf::DW_OP_deref);
|
|
|
|
NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
|
2015-04-22 02:44:06 +08:00
|
|
|
DIB.insertDbgValueIntrinsic(AI, 0, DDI->getVariable(),
|
Clean up the processing of dbg.value in various places
Summary:
First up is instcombine, where in the dbg.declare -> dbg.value conversion,
the llvm.dbg.value needs to be called on the actual loaded value, rather
than the address (since the whole point of this transformation is to be
able to get rid of the alloca). Further, now that that's cleaned up, we
can remove a hack in the backend, that would add an implicit OP_deref if
the argument to dbg.value was an alloca. This stems from before the
existence of DIExpression and is no longer necessary since the deref can
be expressed explicitly.
Now, in order to make sure that the tests pass with this change, we need to
correct the printing of DEBUG_VALUE comments to take into account the
expression, which wasn't taken into account before.
Unfortunately, for both these changes, there were a number of incorrect
test cases (mostly the wrong number of DW_OP_derefs, but also a couple
where the test itself was broken more badly). aprantl and I have gone
through and adjusted these test case in order to make them pass with
these fixes and in some cases to make sure they're actually testing
what they are meant to test.
Reviewers: aprantl
Subscribers: dsanders
Differential Revision: http://reviews.llvm.org/D14186
llvm-svn: 256077
2015-12-19 10:02:44 +08:00
|
|
|
DIB.createExpression(NewDIExpr),
|
|
|
|
DDI->getDebugLoc(), CI);
|
Move the complex address expression out of DIVariable and into an extra
argument of the llvm.dbg.declare/llvm.dbg.value intrinsics.
Previously, DIVariable was a variable-length field that has an optional
reference to a Metadata array consisting of a variable number of
complex address expressions. In the case of OpPiece expressions this is
wasting a lot of storage in IR, because when an aggregate type is, e.g.,
SROA'd into all of its n individual members, the IR will contain n copies
of the DIVariable, all alike, only differing in the complex address
reference at the end.
By making the complex address into an extra argument of the
dbg.value/dbg.declare intrinsics, all of the pieces can reference the
same variable and the complex address expressions can be uniqued across
the CU, too.
Down the road, this will allow us to move other flags, such as
"indirection" out of the DIVariable, too.
The new intrinsics look like this:
declare void @llvm.dbg.declare(metadata %storage, metadata %var, metadata %expr)
declare void @llvm.dbg.value(metadata %storage, i64 %offset, metadata %var, metadata %expr)
This patch adds a new LLVM-local tag to DIExpressions, so we can detect
and pretty-print DIExpression metadata nodes.
What this patch doesn't do:
This patch does not touch the "Indirect" field in DIVariable; but moving
that into the expression would be a natural next step.
http://reviews.llvm.org/D4919
rdar://problem/17994491
Thanks to dblaikie and dexonsmith for reviewing this patch!
Note: I accidentally committed a bogus older version of this patch previously.
llvm-svn: 218787
2014-10-02 02:55:02 +08:00
|
|
|
}
|
2016-01-15 03:12:27 +08:00
|
|
|
}
|
2014-04-26 04:49:25 +08:00
|
|
|
DDI->eraseFromParent();
|
2011-03-18 06:18:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2011-05-24 11:10:43 +08:00
|
|
|
|
|
|
|
/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the
|
|
|
|
/// alloca 'V', if any.
|
|
|
|
DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
if (auto *L = LocalAsMetadata::getIfExists(V))
|
|
|
|
if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
|
|
|
|
for (User *U : MDV->users())
|
|
|
|
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
|
|
|
|
return DDI;
|
2011-05-24 11:10:43 +08:00
|
|
|
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-05-24 11:10:43 +08:00
|
|
|
}
|
2012-12-12 22:31:53 +08:00
|
|
|
|
2016-09-22 22:13:25 +08:00
|
|
|
/// FindAllocaDbgValues - Finds the llvm.dbg.value intrinsics describing the
|
|
|
|
/// alloca 'V', if any.
|
|
|
|
void llvm::FindAllocaDbgValues(DbgValueList &DbgValues, Value *V) {
|
|
|
|
if (auto *L = LocalAsMetadata::getIfExists(V))
|
|
|
|
if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
|
|
|
|
for (User *U : MDV->users())
|
|
|
|
if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
|
|
|
|
DbgValues.push_back(DVI);
|
|
|
|
}
|
|
|
|
|
2016-06-17 06:34:00 +08:00
|
|
|
static void DIExprAddDeref(SmallVectorImpl<uint64_t> &Expr) {
|
|
|
|
Expr.push_back(dwarf::DW_OP_deref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void DIExprAddOffset(SmallVectorImpl<uint64_t> &Expr, int Offset) {
|
|
|
|
if (Offset > 0) {
|
|
|
|
Expr.push_back(dwarf::DW_OP_plus);
|
|
|
|
Expr.push_back(Offset);
|
|
|
|
} else if (Offset < 0) {
|
|
|
|
Expr.push_back(dwarf::DW_OP_minus);
|
|
|
|
Expr.push_back(-Offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static DIExpression *BuildReplacementDIExpr(DIBuilder &Builder,
|
|
|
|
DIExpression *DIExpr, bool Deref,
|
|
|
|
int Offset) {
|
|
|
|
if (!Deref && !Offset)
|
|
|
|
return DIExpr;
|
|
|
|
// Create a copy of the original DIDescriptor for user variable, prepending
|
|
|
|
// "deref" operation to a list of address elements, as new llvm.dbg.declare
|
|
|
|
// will take a value storing address of the memory for variable, not
|
|
|
|
// alloca itself.
|
|
|
|
SmallVector<uint64_t, 4> NewDIExpr;
|
|
|
|
if (Deref)
|
|
|
|
DIExprAddDeref(NewDIExpr);
|
|
|
|
DIExprAddOffset(NewDIExpr, Offset);
|
|
|
|
if (DIExpr)
|
|
|
|
NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
|
|
|
|
return Builder.createExpression(NewDIExpr);
|
|
|
|
}
|
|
|
|
|
2015-12-01 08:40:05 +08:00
|
|
|
bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
|
|
|
|
Instruction *InsertBefore, DIBuilder &Builder,
|
|
|
|
bool Deref, int Offset) {
|
|
|
|
DbgDeclareInst *DDI = FindAllocaDbgDeclare(Address);
|
2012-12-12 22:31:53 +08:00
|
|
|
if (!DDI)
|
|
|
|
return false;
|
2015-01-31 03:37:48 +08:00
|
|
|
DebugLoc Loc = DDI->getDebugLoc();
|
2015-04-22 02:44:06 +08:00
|
|
|
auto *DIVar = DDI->getVariable();
|
|
|
|
auto *DIExpr = DDI->getExpression();
|
2015-04-22 02:24:23 +08:00
|
|
|
assert(DIVar && "Missing variable");
|
2012-12-12 22:31:53 +08:00
|
|
|
|
2016-06-17 06:34:00 +08:00
|
|
|
DIExpr = BuildReplacementDIExpr(Builder, DIExpr, Deref, Offset);
|
2012-12-12 22:31:53 +08:00
|
|
|
|
2015-09-29 08:30:19 +08:00
|
|
|
// Insert llvm.dbg.declare immediately after the original alloca, and remove
|
|
|
|
// old llvm.dbg.declare.
|
2015-12-01 08:40:05 +08:00
|
|
|
Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
|
2012-12-12 22:31:53 +08:00
|
|
|
DDI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2012-12-21 19:18:49 +08:00
|
|
|
|
2015-12-01 08:40:05 +08:00
|
|
|
bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
|
|
|
|
DIBuilder &Builder, bool Deref, int Offset) {
|
|
|
|
return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
|
|
|
|
Deref, Offset);
|
|
|
|
}
|
|
|
|
|
2016-06-17 06:34:00 +08:00
|
|
|
static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
|
|
|
|
DIBuilder &Builder, int Offset) {
|
|
|
|
DebugLoc Loc = DVI->getDebugLoc();
|
|
|
|
auto *DIVar = DVI->getVariable();
|
|
|
|
auto *DIExpr = DVI->getExpression();
|
|
|
|
assert(DIVar && "Missing variable");
|
|
|
|
|
|
|
|
// This is an alloca-based llvm.dbg.value. The first thing it should do with
|
|
|
|
// the alloca pointer is dereference it. Otherwise we don't know how to handle
|
|
|
|
// it and give up.
|
|
|
|
if (!DIExpr || DIExpr->getNumElements() < 1 ||
|
|
|
|
DIExpr->getElement(0) != dwarf::DW_OP_deref)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Insert the offset immediately after the first deref.
|
|
|
|
// We could just change the offset argument of dbg.value, but it's unsigned...
|
|
|
|
if (Offset) {
|
|
|
|
SmallVector<uint64_t, 4> NewDIExpr;
|
|
|
|
DIExprAddDeref(NewDIExpr);
|
|
|
|
DIExprAddOffset(NewDIExpr, Offset);
|
|
|
|
NewDIExpr.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
|
|
|
|
DIExpr = Builder.createExpression(NewDIExpr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Builder.insertDbgValueIntrinsic(NewAddress, DVI->getOffset(), DIVar, DIExpr,
|
|
|
|
Loc, DVI);
|
|
|
|
DVI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
|
|
|
void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
|
|
|
|
DIBuilder &Builder, int Offset) {
|
|
|
|
if (auto *L = LocalAsMetadata::getIfExists(AI))
|
|
|
|
if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
|
|
|
|
for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
|
|
|
|
Use &U = *UI++;
|
|
|
|
if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
|
|
|
|
replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-24 13:26:18 +08:00
|
|
|
unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
|
|
|
|
unsigned NumDeadInst = 0;
|
|
|
|
// Delete the instructions backwards, as it has a reduced likelihood of
|
|
|
|
// having to update as many def-use and use-def chains.
|
|
|
|
Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
|
2016-02-22 04:39:50 +08:00
|
|
|
while (EndInst != &BB->front()) {
|
2016-01-24 13:26:18 +08:00
|
|
|
// Delete the next to last instruction.
|
|
|
|
Instruction *Inst = &*--EndInst->getIterator();
|
|
|
|
if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
|
|
|
|
Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
|
|
|
|
if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
|
|
|
|
EndInst = Inst;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!isa<DbgInfoIntrinsic>(Inst))
|
|
|
|
++NumDeadInst;
|
|
|
|
Inst->eraseFromParent();
|
|
|
|
}
|
|
|
|
return NumDeadInst;
|
|
|
|
}
|
|
|
|
|
2016-11-19 05:01:12 +08:00
|
|
|
unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
|
|
|
|
bool PreserveLCSSA) {
|
2013-08-13 06:38:43 +08:00
|
|
|
BasicBlock *BB = I->getParent();
|
|
|
|
// Loop over all of the successors, removing BB's entry from any PHI
|
|
|
|
// nodes.
|
2016-06-25 16:34:38 +08:00
|
|
|
for (BasicBlock *Successor : successors(BB))
|
2016-11-19 05:01:12 +08:00
|
|
|
Successor->removePredecessor(BB, PreserveLCSSA);
|
2016-06-25 16:19:55 +08:00
|
|
|
|
|
|
|
// Insert a call to llvm.trap right before this. This turns the undefined
|
|
|
|
// behavior into a hard fail instead of falling through into random code.
|
|
|
|
if (UseLLVMTrap) {
|
|
|
|
Function *TrapFn =
|
|
|
|
Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
|
|
|
|
CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
|
|
|
|
CallTrap->setDebugLoc(I->getDebugLoc());
|
|
|
|
}
|
2013-08-13 06:38:43 +08:00
|
|
|
new UnreachableInst(I->getContext(), I);
|
|
|
|
|
|
|
|
// All instructions after this are dead.
|
2016-01-24 14:26:47 +08:00
|
|
|
unsigned NumInstrsRemoved = 0;
|
2015-10-13 10:39:05 +08:00
|
|
|
BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
|
2013-08-13 06:38:43 +08:00
|
|
|
while (BBI != BBE) {
|
|
|
|
if (!BBI->use_empty())
|
|
|
|
BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
|
|
|
|
BB->getInstList().erase(BBI++);
|
2016-01-24 14:26:47 +08:00
|
|
|
++NumInstrsRemoved;
|
2013-08-13 06:38:43 +08:00
|
|
|
}
|
2016-01-24 14:26:47 +08:00
|
|
|
return NumInstrsRemoved;
|
2013-08-13 06:38:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// changeToCall - Convert the specified invoke into a normal call.
|
|
|
|
static void changeToCall(InvokeInst *II) {
|
2015-12-10 14:39:02 +08:00
|
|
|
SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
|
2015-12-09 06:26:08 +08:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
II->getOperandBundlesAsDefs(OpBundles);
|
|
|
|
CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
|
|
|
|
"", II);
|
2013-08-13 06:38:43 +08:00
|
|
|
NewCall->takeName(II);
|
|
|
|
NewCall->setCallingConv(II->getCallingConv());
|
|
|
|
NewCall->setAttributes(II->getAttributes());
|
|
|
|
NewCall->setDebugLoc(II->getDebugLoc());
|
|
|
|
II->replaceAllUsesWith(NewCall);
|
|
|
|
|
|
|
|
// Follow the call by a branch to the normal destination.
|
|
|
|
BranchInst::Create(II->getNormalDest(), II);
|
|
|
|
|
|
|
|
// Update PHI nodes in the unwind destination
|
|
|
|
II->getUnwindDest()->removePredecessor(II->getParent());
|
|
|
|
II->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2016-11-15 05:41:13 +08:00
|
|
|
BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
|
|
|
|
BasicBlock *UnwindEdge) {
|
|
|
|
BasicBlock *BB = CI->getParent();
|
|
|
|
|
|
|
|
// Convert this function call into an invoke instruction. First, split the
|
|
|
|
// basic block.
|
|
|
|
BasicBlock *Split =
|
|
|
|
BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
|
|
|
|
|
|
|
|
// Delete the unconditional branch inserted by splitBasicBlock
|
|
|
|
BB->getInstList().pop_back();
|
|
|
|
|
|
|
|
// Create the new invoke instruction.
|
|
|
|
SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
|
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
|
|
|
|
CI->getOperandBundlesAsDefs(OpBundles);
|
|
|
|
|
|
|
|
// Note: we're round tripping operand bundles through memory here, and that
|
|
|
|
// can potentially be avoided with a cleverer API design that we do not have
|
|
|
|
// as of this time.
|
|
|
|
|
|
|
|
InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
|
|
|
|
InvokeArgs, OpBundles, CI->getName(), BB);
|
|
|
|
II->setDebugLoc(CI->getDebugLoc());
|
|
|
|
II->setCallingConv(CI->getCallingConv());
|
|
|
|
II->setAttributes(CI->getAttributes());
|
|
|
|
|
|
|
|
// Make sure that anything using the call now uses the invoke! This also
|
|
|
|
// updates the CallGraph if present, because it uses a WeakVH.
|
|
|
|
CI->replaceAllUsesWith(II);
|
|
|
|
|
|
|
|
// Delete the original call
|
|
|
|
Split->getInstList().pop_front();
|
|
|
|
return Split;
|
|
|
|
}
|
|
|
|
|
2015-06-18 04:52:32 +08:00
|
|
|
static bool markAliveBlocks(Function &F,
|
2014-08-21 13:55:13 +08:00
|
|
|
SmallPtrSetImpl<BasicBlock*> &Reachable) {
|
2013-08-13 06:38:43 +08:00
|
|
|
|
2012-12-21 19:18:49 +08:00
|
|
|
SmallVector<BasicBlock*, 128> Worklist;
|
2015-10-13 10:39:05 +08:00
|
|
|
BasicBlock *BB = &F.front();
|
2013-08-13 06:38:43 +08:00
|
|
|
Worklist.push_back(BB);
|
|
|
|
Reachable.insert(BB);
|
|
|
|
bool Changed = false;
|
2012-12-21 19:18:49 +08:00
|
|
|
do {
|
2013-08-13 06:38:43 +08:00
|
|
|
BB = Worklist.pop_back_val();
|
|
|
|
|
|
|
|
// Do a quick scan of the basic block, turning any obviously unreachable
|
|
|
|
// instructions into LLVM unreachable insts. The instruction combining pass
|
|
|
|
// canonicalizes unreachable insts into stores to null or undef.
|
2016-06-25 16:34:38 +08:00
|
|
|
for (Instruction &I : *BB) {
|
2014-07-26 05:13:35 +08:00
|
|
|
// Assumptions that are known to be false are equivalent to unreachable.
|
|
|
|
// Also, if the condition is undefined, then we make the choice most
|
|
|
|
// beneficial to the optimizer, and choose that to also be unreachable.
|
2016-06-25 16:34:38 +08:00
|
|
|
if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
|
2014-07-26 05:13:35 +08:00
|
|
|
if (II->getIntrinsicID() == Intrinsic::assume) {
|
2016-06-25 16:34:38 +08:00
|
|
|
if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
|
2016-06-25 16:19:55 +08:00
|
|
|
// Don't insert a call to llvm.trap right before the unreachable.
|
2016-06-25 16:34:38 +08:00
|
|
|
changeToUnreachable(II, false);
|
2014-07-26 05:13:35 +08:00
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-21 13:09:12 +08:00
|
|
|
if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
|
|
|
|
// A call to the guard intrinsic bails out of the current compilation
|
|
|
|
// unit if the predicate passed to it is false. If the predicate is a
|
|
|
|
// constant false, then we know the guard will bail out of the current
|
|
|
|
// compile unconditionally, so all code following it is dead.
|
|
|
|
//
|
|
|
|
// Note: unlike in llvm.assume, it is not "obviously profitable" for
|
|
|
|
// guards to treat `undef` as `false` since a guard on `undef` can
|
|
|
|
// still be useful for widening.
|
2016-06-25 16:34:38 +08:00
|
|
|
if (match(II->getArgOperand(0), m_Zero()))
|
|
|
|
if (!isa<UnreachableInst>(II->getNextNode())) {
|
2016-06-25 16:19:55 +08:00
|
|
|
changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/ false);
|
2016-04-21 13:09:12 +08:00
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-25 16:34:38 +08:00
|
|
|
if (auto *CI = dyn_cast<CallInst>(&I)) {
|
2016-06-25 15:37:27 +08:00
|
|
|
Value *Callee = CI->getCalledValue();
|
|
|
|
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
|
2016-06-25 16:19:55 +08:00
|
|
|
changeToUnreachable(CI, /*UseLLVMTrap=*/false);
|
2016-06-25 15:37:27 +08:00
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
2013-08-13 06:38:43 +08:00
|
|
|
if (CI->doesNotReturn()) {
|
|
|
|
// If we found a call to a no-return function, insert an unreachable
|
|
|
|
// instruction after it. Make sure there isn't *already* one there
|
|
|
|
// though.
|
2016-06-25 16:34:38 +08:00
|
|
|
if (!isa<UnreachableInst>(CI->getNextNode())) {
|
2016-06-25 16:19:55 +08:00
|
|
|
// Don't insert a call to llvm.trap right before the unreachable.
|
2016-06-25 16:34:38 +08:00
|
|
|
changeToUnreachable(CI->getNextNode(), false);
|
2013-08-13 06:38:43 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store to undef and store to null are undefined and used to signal that
|
|
|
|
// they should be changed to unreachable by passes that can't modify the
|
|
|
|
// CFG.
|
2016-06-25 16:34:38 +08:00
|
|
|
if (auto *SI = dyn_cast<StoreInst>(&I)) {
|
2013-08-13 06:38:43 +08:00
|
|
|
// Don't touch volatile stores.
|
|
|
|
if (SI->isVolatile()) continue;
|
|
|
|
|
|
|
|
Value *Ptr = SI->getOperand(1);
|
|
|
|
|
|
|
|
if (isa<UndefValue>(Ptr) ||
|
|
|
|
(isa<ConstantPointerNull>(Ptr) &&
|
|
|
|
SI->getPointerAddressSpace() == 0)) {
|
2016-06-25 16:19:55 +08:00
|
|
|
changeToUnreachable(SI, true);
|
2013-08-13 06:38:43 +08:00
|
|
|
Changed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-05 14:27:50 +08:00
|
|
|
TerminatorInst *Terminator = BB->getTerminator();
|
|
|
|
if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
|
|
|
|
// Turn invokes that call 'nounwind' functions into ordinary calls.
|
2013-08-13 06:38:43 +08:00
|
|
|
Value *Callee = II->getCalledValue();
|
|
|
|
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
|
2016-06-25 16:19:55 +08:00
|
|
|
changeToUnreachable(II, true);
|
2013-08-13 06:38:43 +08:00
|
|
|
Changed = true;
|
2015-06-18 04:52:32 +08:00
|
|
|
} else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
|
2013-08-13 06:38:43 +08:00
|
|
|
if (II->use_empty() && II->onlyReadsMemory()) {
|
|
|
|
// jump to the normal destination branch.
|
|
|
|
BranchInst::Create(II->getNormalDest(), II);
|
|
|
|
II->getUnwindDest()->removePredecessor(II->getParent());
|
|
|
|
II->eraseFromParent();
|
|
|
|
} else
|
|
|
|
changeToCall(II);
|
|
|
|
Changed = true;
|
|
|
|
}
|
2016-01-05 14:27:50 +08:00
|
|
|
} else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
|
|
|
|
// Remove catchpads which cannot be reached.
|
2016-01-05 15:42:17 +08:00
|
|
|
struct CatchPadDenseMapInfo {
|
|
|
|
static CatchPadInst *getEmptyKey() {
|
|
|
|
return DenseMapInfo<CatchPadInst *>::getEmptyKey();
|
|
|
|
}
|
|
|
|
static CatchPadInst *getTombstoneKey() {
|
|
|
|
return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
|
|
|
|
}
|
|
|
|
static unsigned getHashValue(CatchPadInst *CatchPad) {
|
|
|
|
return static_cast<unsigned>(hash_combine_range(
|
|
|
|
CatchPad->value_op_begin(), CatchPad->value_op_end()));
|
|
|
|
}
|
|
|
|
static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
|
|
|
|
if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
|
|
|
|
RHS == getEmptyKey() || RHS == getTombstoneKey())
|
|
|
|
return LHS == RHS;
|
|
|
|
return LHS->isIdenticalTo(RHS);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Set of unique CatchPads.
|
|
|
|
SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
|
|
|
|
CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
|
|
|
|
HandlerSet;
|
|
|
|
detail::DenseSetEmpty Empty;
|
2016-01-05 14:27:50 +08:00
|
|
|
for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
|
|
|
|
E = CatchSwitch->handler_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
BasicBlock *HandlerBB = *I;
|
2016-01-05 15:42:17 +08:00
|
|
|
auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
|
|
|
|
if (!HandlerSet.insert({CatchPad, Empty}).second) {
|
2016-01-05 14:27:50 +08:00
|
|
|
CatchSwitch->removeHandler(I);
|
|
|
|
--I;
|
|
|
|
--E;
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
2013-08-13 06:38:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Changed |= ConstantFoldTerminator(BB, true);
|
2016-06-25 16:34:38 +08:00
|
|
|
for (BasicBlock *Successor : successors(BB))
|
|
|
|
if (Reachable.insert(Successor).second)
|
|
|
|
Worklist.push_back(Successor);
|
2012-12-21 19:18:49 +08:00
|
|
|
} while (!Worklist.empty());
|
2013-08-13 06:38:43 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-09-27 09:47:46 +08:00
|
|
|
void llvm::removeUnwindEdge(BasicBlock *BB) {
|
|
|
|
TerminatorInst *TI = BB->getTerminator();
|
|
|
|
|
|
|
|
if (auto *II = dyn_cast<InvokeInst>(TI)) {
|
|
|
|
changeToCall(II);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
TerminatorInst *NewTI;
|
|
|
|
BasicBlock *UnwindDest;
|
|
|
|
|
|
|
|
if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
|
|
|
|
NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
|
|
|
|
UnwindDest = CRI->getUnwindDest();
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
} else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
|
|
|
|
auto *NewCatchSwitch = CatchSwitchInst::Create(
|
|
|
|
CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
|
|
|
|
CatchSwitch->getName(), CatchSwitch);
|
|
|
|
for (BasicBlock *PadBB : CatchSwitch->handlers())
|
|
|
|
NewCatchSwitch->addHandler(PadBB);
|
|
|
|
|
|
|
|
NewTI = NewCatchSwitch;
|
|
|
|
UnwindDest = CatchSwitch->getUnwindDest();
|
2015-09-27 09:47:46 +08:00
|
|
|
} else {
|
|
|
|
llvm_unreachable("Could not find unwind successor");
|
|
|
|
}
|
|
|
|
|
|
|
|
NewTI->takeName(TI);
|
|
|
|
NewTI->setDebugLoc(TI->getDebugLoc());
|
|
|
|
UnwindDest->removePredecessor(BB);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
TI->replaceAllUsesWith(NewTI);
|
2015-09-27 09:47:46 +08:00
|
|
|
TI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2013-08-13 06:38:43 +08:00
|
|
|
/// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even
|
|
|
|
/// if they are in a dead cycle. Return true if a change was made, false
|
|
|
|
/// otherwise.
|
2016-06-17 00:25:53 +08:00
|
|
|
bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
|
2016-01-30 09:24:31 +08:00
|
|
|
SmallPtrSet<BasicBlock*, 16> Reachable;
|
2015-06-18 04:52:32 +08:00
|
|
|
bool Changed = markAliveBlocks(F, Reachable);
|
2012-12-21 19:18:49 +08:00
|
|
|
|
2013-08-13 06:38:43 +08:00
|
|
|
// If there are unreachable blocks in the CFG...
|
2012-12-21 19:18:49 +08:00
|
|
|
if (Reachable.size() == F.size())
|
2013-08-13 06:38:43 +08:00
|
|
|
return Changed;
|
2012-12-21 19:18:49 +08:00
|
|
|
|
|
|
|
assert(Reachable.size() < F.size());
|
2013-08-13 06:38:43 +08:00
|
|
|
NumRemoved += F.size()-Reachable.size();
|
|
|
|
|
|
|
|
// Loop over all of the basic blocks that are not reachable, dropping all of
|
|
|
|
// their internal references...
|
|
|
|
for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
|
2015-10-13 10:39:05 +08:00
|
|
|
if (Reachable.count(&*BB))
|
2012-12-21 19:18:49 +08:00
|
|
|
continue;
|
|
|
|
|
2016-06-25 16:34:38 +08:00
|
|
|
for (BasicBlock *Successor : successors(&*BB))
|
|
|
|
if (Reachable.count(Successor))
|
|
|
|
Successor->removePredecessor(&*BB);
|
2016-01-10 15:13:04 +08:00
|
|
|
if (LVI)
|
|
|
|
LVI->eraseBlock(&*BB);
|
2013-08-13 06:38:43 +08:00
|
|
|
BB->dropAllReferences();
|
2013-03-22 16:43:04 +08:00
|
|
|
}
|
2012-12-21 19:18:49 +08:00
|
|
|
|
2013-08-13 06:38:43 +08:00
|
|
|
for (Function::iterator I = ++F.begin(); I != F.end();)
|
2015-10-13 10:39:05 +08:00
|
|
|
if (!Reachable.count(&*I))
|
2013-03-22 16:43:04 +08:00
|
|
|
I = F.getBasicBlockList().erase(I);
|
|
|
|
else
|
|
|
|
++I;
|
2012-12-21 19:18:49 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2014-08-15 23:46:38 +08:00
|
|
|
|
2015-10-03 06:12:22 +08:00
|
|
|
void llvm::combineMetadata(Instruction *K, const Instruction *J,
|
|
|
|
ArrayRef<unsigned> KnownIDs) {
|
2014-11-12 05:30:22 +08:00
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
|
2015-08-21 06:00:30 +08:00
|
|
|
K->dropUnknownNonDebugMetadata(KnownIDs);
|
2014-08-15 23:46:38 +08:00
|
|
|
K->getAllMetadataOtherThanDebugLoc(Metadata);
|
2016-07-25 10:21:19 +08:00
|
|
|
for (const auto &MD : Metadata) {
|
|
|
|
unsigned Kind = MD.first;
|
2014-11-12 05:30:22 +08:00
|
|
|
MDNode *JMD = J->getMetadata(Kind);
|
2016-07-25 10:21:19 +08:00
|
|
|
MDNode *KMD = MD.second;
|
2014-08-15 23:46:38 +08:00
|
|
|
|
|
|
|
switch (Kind) {
|
|
|
|
default:
|
|
|
|
K->setMetadata(Kind, nullptr); // Remove unknown metadata
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_dbg:
|
|
|
|
llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
|
|
|
|
case LLVMContext::MD_tbaa:
|
|
|
|
K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_alias_scope:
|
2015-02-09 01:07:14 +08:00
|
|
|
K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
|
|
|
|
break;
|
2014-08-15 23:46:38 +08:00
|
|
|
case LLVMContext::MD_noalias:
|
2016-04-26 10:06:06 +08:00
|
|
|
case LLVMContext::MD_mem_parallel_loop_access:
|
2014-08-15 23:46:38 +08:00
|
|
|
K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_range:
|
|
|
|
K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_fpmath:
|
|
|
|
K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_invariant_load:
|
|
|
|
// Only set the !invariant.load if it is present in both instructions.
|
|
|
|
K->setMetadata(Kind, JMD);
|
|
|
|
break;
|
2014-10-22 05:02:19 +08:00
|
|
|
case LLVMContext::MD_nonnull:
|
|
|
|
// Only set the !nonnull if it is present in both instructions.
|
|
|
|
K->setMetadata(Kind, JMD);
|
|
|
|
break;
|
2015-10-03 06:12:22 +08:00
|
|
|
case LLVMContext::MD_invariant_group:
|
|
|
|
// Preserve !invariant.group in K.
|
|
|
|
break;
|
2015-11-03 01:53:51 +08:00
|
|
|
case LLVMContext::MD_align:
|
|
|
|
K->setMetadata(Kind,
|
|
|
|
MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_dereferenceable:
|
|
|
|
case LLVMContext::MD_dereferenceable_or_null:
|
|
|
|
K->setMetadata(Kind,
|
|
|
|
MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
|
|
|
|
break;
|
2014-08-15 23:46:38 +08:00
|
|
|
}
|
|
|
|
}
|
2015-10-03 06:12:22 +08:00
|
|
|
// Set !invariant.group from J if J has it. If both instructions have it
|
|
|
|
// then we will just pick it from J - even when they are different.
|
|
|
|
// Also make sure that K is load or store - f.e. combining bitcast with load
|
|
|
|
// could produce bitcast with invariant.group metadata, which is invalid.
|
|
|
|
// FIXME: we should try to preserve both invariant.group md if they are
|
|
|
|
// different, but right now instruction can only have one invariant.group.
|
|
|
|
if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
|
|
|
|
if (isa<LoadInst>(K) || isa<StoreInst>(K))
|
|
|
|
K->setMetadata(LLVMContext::MD_invariant_group, JMD);
|
2014-08-15 23:46:38 +08:00
|
|
|
}
|
2015-05-23 07:53:24 +08:00
|
|
|
|
2016-08-08 12:10:22 +08:00
|
|
|
void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
|
|
|
|
unsigned KnownIDs[] = {
|
|
|
|
LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
|
|
|
|
LLVMContext::MD_noalias, LLVMContext::MD_range,
|
|
|
|
LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
|
|
|
|
LLVMContext::MD_invariant_group, LLVMContext::MD_align,
|
|
|
|
LLVMContext::MD_dereferenceable,
|
|
|
|
LLVMContext::MD_dereferenceable_or_null};
|
|
|
|
combineMetadata(K, J, KnownIDs);
|
|
|
|
}
|
|
|
|
|
2015-05-23 07:53:24 +08:00
|
|
|
unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
|
|
|
|
DominatorTree &DT,
|
|
|
|
const BasicBlockEdge &Root) {
|
|
|
|
assert(From->getType() == To->getType());
|
|
|
|
|
|
|
|
unsigned Count = 0;
|
|
|
|
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
|
|
|
|
UI != UE; ) {
|
|
|
|
Use &U = *UI++;
|
|
|
|
if (DT.dominates(Root, U)) {
|
|
|
|
U.set(To);
|
|
|
|
DEBUG(dbgs() << "Replace dominated use of '"
|
|
|
|
<< From->getName() << "' as "
|
|
|
|
<< *To << " in " << *U << "\n");
|
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Count;
|
|
|
|
}
|
2015-09-03 03:59:59 +08:00
|
|
|
|
|
|
|
unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
|
|
|
|
DominatorTree &DT,
|
2016-09-08 23:25:12 +08:00
|
|
|
const BasicBlock *BB) {
|
2015-09-03 03:59:59 +08:00
|
|
|
assert(From->getType() == To->getType());
|
|
|
|
|
|
|
|
unsigned Count = 0;
|
|
|
|
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
|
|
|
|
UI != UE;) {
|
|
|
|
Use &U = *UI++;
|
|
|
|
auto *I = cast<Instruction>(U.getUser());
|
2016-09-08 23:25:12 +08:00
|
|
|
if (DT.properlyDominates(BB, I->getParent())) {
|
2015-09-03 03:59:59 +08:00
|
|
|
U.set(To);
|
|
|
|
DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
|
|
|
|
<< *To << " in " << *U << "\n");
|
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Count;
|
|
|
|
}
|
2015-10-09 07:18:30 +08:00
|
|
|
|
|
|
|
bool llvm::callsGCLeafFunction(ImmutableCallSite CS) {
|
|
|
|
// Check if the function is specifically marked as a gc leaf function.
|
2016-01-06 07:59:08 +08:00
|
|
|
if (CS.hasFnAttr("gc-leaf-function"))
|
|
|
|
return true;
|
2016-03-26 04:12:13 +08:00
|
|
|
if (const Function *F = CS.getCalledFunction()) {
|
|
|
|
if (F->hasFnAttribute("gc-leaf-function"))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (auto IID = F->getIntrinsicID())
|
|
|
|
// Most LLVM intrinsics do not take safepoints.
|
|
|
|
return IID != Intrinsic::experimental_gc_statepoint &&
|
|
|
|
IID != Intrinsic::experimental_deoptimize;
|
|
|
|
}
|
2015-10-09 07:18:30 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2016-01-15 17:20:19 +08:00
|
|
|
|
2016-08-06 19:13:10 +08:00
|
|
|
namespace {
|
2016-01-15 17:20:19 +08:00
|
|
|
/// A potential constituent of a bitreverse or bswap expression. See
|
|
|
|
/// collectBitParts for a fuller explanation.
|
|
|
|
struct BitPart {
|
|
|
|
BitPart(Value *P, unsigned BW) : Provider(P) {
|
|
|
|
Provenance.resize(BW);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The Value that this is a bitreverse/bswap of.
|
|
|
|
Value *Provider;
|
|
|
|
/// The "provenance" of each bit. Provenance[A] = B means that bit A
|
|
|
|
/// in Provider becomes bit B in the result of this expression.
|
|
|
|
SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
|
|
|
|
|
|
|
|
enum { Unset = -1 };
|
|
|
|
};
|
2016-08-06 19:13:10 +08:00
|
|
|
} // end anonymous namespace
|
2016-01-15 17:20:19 +08:00
|
|
|
|
|
|
|
/// Analyze the specified subexpression and see if it is capable of providing
|
|
|
|
/// pieces of a bswap or bitreverse. The subexpression provides a potential
|
|
|
|
/// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
|
|
|
|
/// the output of the expression came from a corresponding bit in some other
|
|
|
|
/// value. This function is recursive, and the end result is a mapping of
|
|
|
|
/// bitnumber to bitnumber. It is the caller's responsibility to validate that
|
|
|
|
/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
|
|
|
|
///
|
|
|
|
/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
|
|
|
|
/// that the expression deposits the low byte of %X into the high byte of the
|
|
|
|
/// result and that all other bits are zero. This expression is accepted and a
|
|
|
|
/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
|
|
|
|
/// [0-7].
|
|
|
|
///
|
|
|
|
/// To avoid revisiting values, the BitPart results are memoized into the
|
|
|
|
/// provided map. To avoid unnecessary copying of BitParts, BitParts are
|
|
|
|
/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
|
|
|
|
/// store BitParts objects, not pointers. As we need the concept of a nullptr
|
|
|
|
/// BitParts (Value has been analyzed and the analysis failed), we an Optional
|
|
|
|
/// type instead to provide the same functionality.
|
|
|
|
///
|
|
|
|
/// Because we pass around references into \c BPS, we must use a container that
|
|
|
|
/// does not invalidate internal references (std::map instead of DenseMap).
|
|
|
|
///
|
|
|
|
static const Optional<BitPart> &
|
|
|
|
collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
|
|
|
|
std::map<Value *, Optional<BitPart>> &BPS) {
|
|
|
|
auto I = BPS.find(V);
|
|
|
|
if (I != BPS.end())
|
|
|
|
return I->second;
|
|
|
|
|
|
|
|
auto &Result = BPS[V] = None;
|
|
|
|
auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
|
|
|
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
// If this is an or instruction, it may be an inner node of the bswap.
|
|
|
|
if (I->getOpcode() == Instruction::Or) {
|
|
|
|
auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
|
|
|
|
MatchBitReversals, BPS);
|
|
|
|
auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
|
|
|
|
MatchBitReversals, BPS);
|
|
|
|
if (!A || !B)
|
|
|
|
return Result;
|
|
|
|
|
|
|
|
// Try and merge the two together.
|
|
|
|
if (!A->Provider || A->Provider != B->Provider)
|
|
|
|
return Result;
|
|
|
|
|
|
|
|
Result = BitPart(A->Provider, BitWidth);
|
|
|
|
for (unsigned i = 0; i < A->Provenance.size(); ++i) {
|
|
|
|
if (A->Provenance[i] != BitPart::Unset &&
|
|
|
|
B->Provenance[i] != BitPart::Unset &&
|
|
|
|
A->Provenance[i] != B->Provenance[i])
|
|
|
|
return Result = None;
|
|
|
|
|
|
|
|
if (A->Provenance[i] == BitPart::Unset)
|
|
|
|
Result->Provenance[i] = B->Provenance[i];
|
|
|
|
else
|
|
|
|
Result->Provenance[i] = A->Provenance[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a logical shift by a constant, recurse then shift the result.
|
|
|
|
if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
|
|
|
|
unsigned BitShift =
|
|
|
|
cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
|
|
|
|
// Ensure the shift amount is defined.
|
|
|
|
if (BitShift > BitWidth)
|
|
|
|
return Result;
|
|
|
|
|
|
|
|
auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
|
|
|
|
MatchBitReversals, BPS);
|
|
|
|
if (!Res)
|
|
|
|
return Result;
|
|
|
|
Result = Res;
|
|
|
|
|
|
|
|
// Perform the "shift" on BitProvenance.
|
|
|
|
auto &P = Result->Provenance;
|
|
|
|
if (I->getOpcode() == Instruction::Shl) {
|
|
|
|
P.erase(std::prev(P.end(), BitShift), P.end());
|
|
|
|
P.insert(P.begin(), BitShift, BitPart::Unset);
|
|
|
|
} else {
|
|
|
|
P.erase(P.begin(), std::next(P.begin(), BitShift));
|
|
|
|
P.insert(P.end(), BitShift, BitPart::Unset);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a logical 'and' with a mask that clears bits, recurse then
|
|
|
|
// unset the appropriate bits.
|
|
|
|
if (I->getOpcode() == Instruction::And &&
|
|
|
|
isa<ConstantInt>(I->getOperand(1))) {
|
|
|
|
APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
|
|
|
|
const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
|
|
|
|
|
|
|
|
// Check that the mask allows a multiple of 8 bits for a bswap, for an
|
|
|
|
// early exit.
|
|
|
|
unsigned NumMaskedBits = AndMask.countPopulation();
|
|
|
|
if (!MatchBitReversals && NumMaskedBits % 8 != 0)
|
|
|
|
return Result;
|
|
|
|
|
|
|
|
auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
|
|
|
|
MatchBitReversals, BPS);
|
|
|
|
if (!Res)
|
|
|
|
return Result;
|
|
|
|
Result = Res;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
|
|
|
|
// If the AndMask is zero for this bit, clear the bit.
|
|
|
|
if ((AndMask & Bit) == 0)
|
|
|
|
Result->Provenance[i] = BitPart::Unset;
|
2016-05-26 22:58:51 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a zext instruction zero extend the result.
|
|
|
|
if (I->getOpcode() == Instruction::ZExt) {
|
|
|
|
auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
|
|
|
|
MatchBitReversals, BPS);
|
|
|
|
if (!Res)
|
|
|
|
return Result;
|
2016-01-15 17:20:19 +08:00
|
|
|
|
2016-05-26 22:58:51 +08:00
|
|
|
Result = BitPart(Res->Provider, BitWidth);
|
|
|
|
auto NarrowBitWidth =
|
|
|
|
cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
|
|
|
|
for (unsigned i = 0; i < NarrowBitWidth; ++i)
|
|
|
|
Result->Provenance[i] = Res->Provenance[i];
|
|
|
|
for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
|
|
|
|
Result->Provenance[i] = BitPart::Unset;
|
2016-01-15 17:20:19 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
|
|
|
|
// the input value to the bswap/bitreverse.
|
|
|
|
Result = BitPart(V, BitWidth);
|
|
|
|
for (unsigned i = 0; i < BitWidth; ++i)
|
|
|
|
Result->Provenance[i] = i;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
|
|
|
|
unsigned BitWidth) {
|
|
|
|
if (From % 8 != To % 8)
|
|
|
|
return false;
|
|
|
|
// Convert from bit indices to byte indices and check for a byte reversal.
|
|
|
|
From >>= 3;
|
|
|
|
To >>= 3;
|
|
|
|
BitWidth >>= 3;
|
|
|
|
return From == BitWidth - To - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
|
|
|
|
unsigned BitWidth) {
|
|
|
|
return From == BitWidth - To - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given an OR instruction, check to see if this is a bitreverse
|
|
|
|
/// idiom. If so, insert the new intrinsic and return true.
|
2016-05-26 00:22:14 +08:00
|
|
|
bool llvm::recognizeBSwapOrBitReverseIdiom(
|
2016-01-15 17:20:19 +08:00
|
|
|
Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
|
|
|
|
SmallVectorImpl<Instruction *> &InsertedInsts) {
|
|
|
|
if (Operator::getOpcode(I) != Instruction::Or)
|
|
|
|
return false;
|
|
|
|
if (!MatchBSwaps && !MatchBitReversals)
|
|
|
|
return false;
|
|
|
|
IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
|
|
|
|
if (!ITy || ITy->getBitWidth() > 128)
|
|
|
|
return false; // Can't do vectors or integers > 128 bits.
|
|
|
|
unsigned BW = ITy->getBitWidth();
|
|
|
|
|
2016-05-26 22:58:51 +08:00
|
|
|
unsigned DemandedBW = BW;
|
|
|
|
IntegerType *DemandedTy = ITy;
|
|
|
|
if (I->hasOneUse()) {
|
|
|
|
if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
|
|
|
|
DemandedTy = cast<IntegerType>(Trunc->getType());
|
|
|
|
DemandedBW = DemandedTy->getBitWidth();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-15 17:20:19 +08:00
|
|
|
// Try to find all the pieces corresponding to the bswap.
|
|
|
|
std::map<Value *, Optional<BitPart>> BPS;
|
|
|
|
auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
|
|
|
|
if (!Res)
|
|
|
|
return false;
|
|
|
|
auto &BitProvenance = Res->Provenance;
|
|
|
|
|
|
|
|
// Now, is the bit permutation correct for a bswap or a bitreverse? We can
|
|
|
|
// only byteswap values with an even number of bytes.
|
2016-05-26 22:58:51 +08:00
|
|
|
bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
|
|
|
|
for (unsigned i = 0; i < DemandedBW; ++i) {
|
|
|
|
OKForBSwap &=
|
|
|
|
bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
|
2016-01-15 17:20:19 +08:00
|
|
|
OKForBitReverse &=
|
2016-05-26 22:58:51 +08:00
|
|
|
bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
|
2016-01-15 17:20:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Intrinsic::ID Intrin;
|
|
|
|
if (OKForBSwap && MatchBSwaps)
|
|
|
|
Intrin = Intrinsic::bswap;
|
|
|
|
else if (OKForBitReverse && MatchBitReversals)
|
|
|
|
Intrin = Intrinsic::bitreverse;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
2016-05-26 22:58:51 +08:00
|
|
|
if (ITy != DemandedTy) {
|
|
|
|
Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
|
|
|
|
Value *Provider = Res->Provider;
|
|
|
|
IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
|
|
|
|
// We may need to truncate the provider.
|
|
|
|
if (DemandedTy != ProviderTy) {
|
|
|
|
auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
|
|
|
|
"trunc", I);
|
|
|
|
InsertedInsts.push_back(Trunc);
|
|
|
|
Provider = Trunc;
|
|
|
|
}
|
|
|
|
auto *CI = CallInst::Create(F, Provider, "rev", I);
|
|
|
|
InsertedInsts.push_back(CI);
|
|
|
|
auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
|
|
|
|
InsertedInsts.push_back(ExtInst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-15 17:20:19 +08:00
|
|
|
Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
|
|
|
|
InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
|
|
|
|
return true;
|
|
|
|
}
|
2016-06-18 18:10:37 +08:00
|
|
|
|
|
|
|
// CodeGen has special handling for some string functions that may replace
|
|
|
|
// them with target-specific intrinsics. Since that'd skip our interceptors
|
|
|
|
// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
|
|
|
|
// we mark affected calls as NoBuiltin, which will disable optimization
|
|
|
|
// in CodeGen.
|
2016-07-29 07:45:15 +08:00
|
|
|
void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
|
|
|
|
CallInst *CI, const TargetLibraryInfo *TLI) {
|
2016-06-18 18:10:37 +08:00
|
|
|
Function *F = CI->getCalledFunction();
|
|
|
|
LibFunc::Func Func;
|
2016-07-29 07:45:15 +08:00
|
|
|
if (F && !F->hasLocalLinkage() && F->hasName() &&
|
|
|
|
TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
|
|
|
|
!F->doesNotAccessMemory())
|
|
|
|
CI->addAttribute(AttributeSet::FunctionIndex, Attribute::NoBuiltin);
|
2016-06-18 18:10:37 +08:00
|
|
|
}
|