forked from OSchip/llvm-project
1275 lines
50 KiB
C++
1275 lines
50 KiB
C++
//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
|
|
//
|
|
// The LLVM Compiler Infrastructure
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// InstructionCombining - Combine instructions to form fewer, simple
|
|
// instructions. This pass does not modify the CFG. This pass is where
|
|
// algebraic simplification happens.
|
|
//
|
|
// This pass combines things like:
|
|
// %Y = add i32 %X, 1
|
|
// %Z = add i32 %Y, 1
|
|
// into:
|
|
// %Z = add i32 %X, 2
|
|
//
|
|
// This is a simple worklist driven algorithm.
|
|
//
|
|
// This pass guarantees that the following canonicalizations are performed on
|
|
// the program:
|
|
// 1. If a binary operator has a constant operand, it is moved to the RHS
|
|
// 2. Bitwise operators with constant operands are always grouped so that
|
|
// shifts are performed first, then or's, then and's, then xor's.
|
|
// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
|
|
// 4. All cmp instructions on boolean values are replaced with logical ops
|
|
// 5. add X, X is represented as (X*2) => (X << 1)
|
|
// 6. Multiplies with a power-of-two constant argument are transformed into
|
|
// shifts.
|
|
// ... etc.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#define DEBUG_TYPE "instcombine"
|
|
#include "llvm/Transforms/Scalar.h"
|
|
#include "InstCombine.h"
|
|
#include "llvm/IntrinsicInst.h"
|
|
#include "llvm/Analysis/ConstantFolding.h"
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
|
#include "llvm/Analysis/MemoryBuiltins.h"
|
|
#include "llvm/Target/TargetData.h"
|
|
#include "llvm/Transforms/Utils/Local.h"
|
|
#include "llvm/Support/CFG.h"
|
|
#include "llvm/Support/Debug.h"
|
|
#include "llvm/Support/GetElementPtrTypeIterator.h"
|
|
#include "llvm/Support/PatternMatch.h"
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
#include "llvm/ADT/Statistic.h"
|
|
#include <algorithm>
|
|
#include <climits>
|
|
using namespace llvm;
|
|
using namespace llvm::PatternMatch;
|
|
|
|
STATISTIC(NumCombined , "Number of insts combined");
|
|
STATISTIC(NumConstProp, "Number of constant folds");
|
|
STATISTIC(NumDeadInst , "Number of dead inst eliminated");
|
|
STATISTIC(NumSunkInst , "Number of instructions sunk");
|
|
|
|
|
|
char InstCombiner::ID = 0;
|
|
static RegisterPass<InstCombiner>
|
|
X("instcombine", "Combine redundant instructions");
|
|
|
|
void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
AU.addPreservedID(LCSSAID);
|
|
AU.setPreservesCFG();
|
|
}
|
|
|
|
|
|
/// ShouldChangeType - Return true if it is desirable to convert a computation
|
|
/// from 'From' to 'To'. We don't want to convert from a legal to an illegal
|
|
/// type for example, or from a smaller to a larger illegal type.
|
|
bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
|
|
assert(From->isIntegerTy() && To->isIntegerTy());
|
|
|
|
// If we don't have TD, we don't know if the source/dest are legal.
|
|
if (!TD) return false;
|
|
|
|
unsigned FromWidth = From->getPrimitiveSizeInBits();
|
|
unsigned ToWidth = To->getPrimitiveSizeInBits();
|
|
bool FromLegal = TD->isLegalInteger(FromWidth);
|
|
bool ToLegal = TD->isLegalInteger(ToWidth);
|
|
|
|
// If this is a legal integer from type, and the result would be an illegal
|
|
// type, don't do the transformation.
|
|
if (FromLegal && !ToLegal)
|
|
return false;
|
|
|
|
// Otherwise, if both are illegal, do not increase the size of the result. We
|
|
// do allow things like i160 -> i64, but not i64 -> i160.
|
|
if (!FromLegal && !ToLegal && ToWidth > FromWidth)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
// SimplifyCommutative - This performs a few simplifications for commutative
|
|
// operators:
|
|
//
|
|
// 1. Order operands such that they are listed from right (least complex) to
|
|
// left (most complex). This puts constants before unary operators before
|
|
// binary operators.
|
|
//
|
|
// 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
|
|
// 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
|
|
//
|
|
bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
|
|
bool Changed = false;
|
|
if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
|
|
Changed = !I.swapOperands();
|
|
|
|
if (!I.isAssociative()) return Changed;
|
|
|
|
Instruction::BinaryOps Opcode = I.getOpcode();
|
|
if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
|
|
if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
|
|
if (isa<Constant>(I.getOperand(1))) {
|
|
Constant *Folded = ConstantExpr::get(I.getOpcode(),
|
|
cast<Constant>(I.getOperand(1)),
|
|
cast<Constant>(Op->getOperand(1)));
|
|
I.setOperand(0, Op->getOperand(0));
|
|
I.setOperand(1, Folded);
|
|
return true;
|
|
}
|
|
|
|
if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)))
|
|
if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
|
|
Op->hasOneUse() && Op1->hasOneUse()) {
|
|
Constant *C1 = cast<Constant>(Op->getOperand(1));
|
|
Constant *C2 = cast<Constant>(Op1->getOperand(1));
|
|
|
|
// Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
|
|
Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
|
|
Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
|
|
Op1->getOperand(0),
|
|
Op1->getName(), &I);
|
|
Worklist.Add(New);
|
|
I.setOperand(0, New);
|
|
I.setOperand(1, Folded);
|
|
return true;
|
|
}
|
|
}
|
|
return Changed;
|
|
}
|
|
|
|
// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
|
|
// if the LHS is a constant zero (which is the 'negate' form).
|
|
//
|
|
Value *InstCombiner::dyn_castNegVal(Value *V) const {
|
|
if (BinaryOperator::isNeg(V))
|
|
return BinaryOperator::getNegArgument(V);
|
|
|
|
// Constants can be considered to be negated values if they can be folded.
|
|
if (ConstantInt *C = dyn_cast<ConstantInt>(V))
|
|
return ConstantExpr::getNeg(C);
|
|
|
|
if (ConstantVector *C = dyn_cast<ConstantVector>(V))
|
|
if (C->getType()->getElementType()->isIntegerTy())
|
|
return ConstantExpr::getNeg(C);
|
|
|
|
return 0;
|
|
}
|
|
|
|
// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
|
|
// instruction if the LHS is a constant negative zero (which is the 'negate'
|
|
// form).
|
|
//
|
|
Value *InstCombiner::dyn_castFNegVal(Value *V) const {
|
|
if (BinaryOperator::isFNeg(V))
|
|
return BinaryOperator::getFNegArgument(V);
|
|
|
|
// Constants can be considered to be negated values if they can be folded.
|
|
if (ConstantFP *C = dyn_cast<ConstantFP>(V))
|
|
return ConstantExpr::getFNeg(C);
|
|
|
|
if (ConstantVector *C = dyn_cast<ConstantVector>(V))
|
|
if (C->getType()->getElementType()->isFloatingPointTy())
|
|
return ConstantExpr::getFNeg(C);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
|
|
InstCombiner *IC) {
|
|
if (CastInst *CI = dyn_cast<CastInst>(&I))
|
|
return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
|
|
|
|
// Figure out if the constant is the left or the right argument.
|
|
bool ConstIsRHS = isa<Constant>(I.getOperand(1));
|
|
Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
|
|
|
|
if (Constant *SOC = dyn_cast<Constant>(SO)) {
|
|
if (ConstIsRHS)
|
|
return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
|
|
return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
|
|
}
|
|
|
|
Value *Op0 = SO, *Op1 = ConstOperand;
|
|
if (!ConstIsRHS)
|
|
std::swap(Op0, Op1);
|
|
|
|
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
|
|
return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
|
|
SO->getName()+".op");
|
|
if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
|
|
return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
|
|
SO->getName()+".cmp");
|
|
if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
|
|
return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
|
|
SO->getName()+".cmp");
|
|
llvm_unreachable("Unknown binary instruction type!");
|
|
}
|
|
|
|
// FoldOpIntoSelect - Given an instruction with a select as one operand and a
|
|
// constant as the other operand, try to fold the binary operator into the
|
|
// select arguments. This also works for Cast instructions, which obviously do
|
|
// not have a second operand.
|
|
Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
|
|
// Don't modify shared select instructions
|
|
if (!SI->hasOneUse()) return 0;
|
|
Value *TV = SI->getOperand(1);
|
|
Value *FV = SI->getOperand(2);
|
|
|
|
if (isa<Constant>(TV) || isa<Constant>(FV)) {
|
|
// Bool selects with constant operands can be folded to logical ops.
|
|
if (SI->getType()->isIntegerTy(1)) return 0;
|
|
|
|
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
|
|
Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
|
|
|
|
return SelectInst::Create(SI->getCondition(), SelectTrueVal,
|
|
SelectFalseVal);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
|
|
/// has a PHI node as operand #0, see if we can fold the instruction into the
|
|
/// PHI (which is only possible if all operands to the PHI are constants).
|
|
///
|
|
/// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
|
|
/// that would normally be unprofitable because they strongly encourage jump
|
|
/// threading.
|
|
Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
|
|
bool AllowAggressive) {
|
|
AllowAggressive = false;
|
|
PHINode *PN = cast<PHINode>(I.getOperand(0));
|
|
unsigned NumPHIValues = PN->getNumIncomingValues();
|
|
if (NumPHIValues == 0 ||
|
|
// We normally only transform phis with a single use, unless we're trying
|
|
// hard to make jump threading happen.
|
|
(!PN->hasOneUse() && !AllowAggressive))
|
|
return 0;
|
|
|
|
|
|
// Check to see if all of the operands of the PHI are simple constants
|
|
// (constantint/constantfp/undef). If there is one non-constant value,
|
|
// remember the BB it is in. If there is more than one or if *it* is a PHI,
|
|
// bail out. We don't do arbitrary constant expressions here because moving
|
|
// their computation can be expensive without a cost model.
|
|
BasicBlock *NonConstBB = 0;
|
|
for (unsigned i = 0; i != NumPHIValues; ++i)
|
|
if (!isa<Constant>(PN->getIncomingValue(i)) ||
|
|
isa<ConstantExpr>(PN->getIncomingValue(i))) {
|
|
if (NonConstBB) return 0; // More than one non-const value.
|
|
if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
|
|
NonConstBB = PN->getIncomingBlock(i);
|
|
|
|
// If the incoming non-constant value is in I's block, we have an infinite
|
|
// loop.
|
|
if (NonConstBB == I.getParent())
|
|
return 0;
|
|
}
|
|
|
|
// If there is exactly one non-constant value, we can insert a copy of the
|
|
// operation in that block. However, if this is a critical edge, we would be
|
|
// inserting the computation one some other paths (e.g. inside a loop). Only
|
|
// do this if the pred block is unconditionally branching into the phi block.
|
|
if (NonConstBB != 0 && !AllowAggressive) {
|
|
BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
|
|
if (!BI || !BI->isUnconditional()) return 0;
|
|
}
|
|
|
|
// Okay, we can do the transformation: create the new PHI node.
|
|
PHINode *NewPN = PHINode::Create(I.getType(), "");
|
|
NewPN->reserveOperandSpace(PN->getNumOperands()/2);
|
|
InsertNewInstBefore(NewPN, *PN);
|
|
NewPN->takeName(PN);
|
|
|
|
// Next, add all of the operands to the PHI.
|
|
if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
|
|
// We only currently try to fold the condition of a select when it is a phi,
|
|
// not the true/false values.
|
|
Value *TrueV = SI->getTrueValue();
|
|
Value *FalseV = SI->getFalseValue();
|
|
BasicBlock *PhiTransBB = PN->getParent();
|
|
for (unsigned i = 0; i != NumPHIValues; ++i) {
|
|
BasicBlock *ThisBB = PN->getIncomingBlock(i);
|
|
Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
|
|
Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
|
|
Value *InV = 0;
|
|
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
|
|
InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
|
|
} else {
|
|
assert(PN->getIncomingBlock(i) == NonConstBB);
|
|
InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
|
|
FalseVInPred,
|
|
"phitmp", NonConstBB->getTerminator());
|
|
Worklist.Add(cast<Instruction>(InV));
|
|
}
|
|
NewPN->addIncoming(InV, ThisBB);
|
|
}
|
|
} else if (I.getNumOperands() == 2) {
|
|
Constant *C = cast<Constant>(I.getOperand(1));
|
|
for (unsigned i = 0; i != NumPHIValues; ++i) {
|
|
Value *InV = 0;
|
|
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
|
|
if (CmpInst *CI = dyn_cast<CmpInst>(&I))
|
|
InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
|
|
else
|
|
InV = ConstantExpr::get(I.getOpcode(), InC, C);
|
|
} else {
|
|
assert(PN->getIncomingBlock(i) == NonConstBB);
|
|
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
|
|
InV = BinaryOperator::Create(BO->getOpcode(),
|
|
PN->getIncomingValue(i), C, "phitmp",
|
|
NonConstBB->getTerminator());
|
|
else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
|
|
InV = CmpInst::Create(CI->getOpcode(),
|
|
CI->getPredicate(),
|
|
PN->getIncomingValue(i), C, "phitmp",
|
|
NonConstBB->getTerminator());
|
|
else
|
|
llvm_unreachable("Unknown binop!");
|
|
|
|
Worklist.Add(cast<Instruction>(InV));
|
|
}
|
|
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
|
|
}
|
|
} else {
|
|
CastInst *CI = cast<CastInst>(&I);
|
|
const Type *RetTy = CI->getType();
|
|
for (unsigned i = 0; i != NumPHIValues; ++i) {
|
|
Value *InV;
|
|
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
|
|
InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
|
|
} else {
|
|
assert(PN->getIncomingBlock(i) == NonConstBB);
|
|
InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
|
|
I.getType(), "phitmp",
|
|
NonConstBB->getTerminator());
|
|
Worklist.Add(cast<Instruction>(InV));
|
|
}
|
|
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
|
|
}
|
|
}
|
|
return ReplaceInstUsesWith(I, NewPN);
|
|
}
|
|
|
|
/// FindElementAtOffset - Given a type and a constant offset, determine whether
|
|
/// or not there is a sequence of GEP indices into the type that will land us at
|
|
/// the specified offset. If so, fill them into NewIndices and return the
|
|
/// resultant element type, otherwise return null.
|
|
const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
|
|
SmallVectorImpl<Value*> &NewIndices) {
|
|
if (!TD) return 0;
|
|
if (!Ty->isSized()) return 0;
|
|
|
|
// Start with the index over the outer type. Note that the type size
|
|
// might be zero (even if the offset isn't zero) if the indexed type
|
|
// is something like [0 x {int, int}]
|
|
const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
|
|
int64_t FirstIdx = 0;
|
|
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
|
|
FirstIdx = Offset/TySize;
|
|
Offset -= FirstIdx*TySize;
|
|
|
|
// Handle hosts where % returns negative instead of values [0..TySize).
|
|
if (Offset < 0) {
|
|
--FirstIdx;
|
|
Offset += TySize;
|
|
assert(Offset >= 0);
|
|
}
|
|
assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
|
|
}
|
|
|
|
NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
|
|
|
|
// Index into the types. If we fail, set OrigBase to null.
|
|
while (Offset) {
|
|
// Indexing into tail padding between struct/array elements.
|
|
if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
|
|
return 0;
|
|
|
|
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
|
|
const StructLayout *SL = TD->getStructLayout(STy);
|
|
assert(Offset < (int64_t)SL->getSizeInBytes() &&
|
|
"Offset must stay within the indexed type");
|
|
|
|
unsigned Elt = SL->getElementContainingOffset(Offset);
|
|
NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
|
|
Elt));
|
|
|
|
Offset -= SL->getElementOffset(Elt);
|
|
Ty = STy->getElementType(Elt);
|
|
} else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
|
|
uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
|
|
assert(EltSize && "Cannot index into a zero-sized array");
|
|
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
|
|
Offset %= EltSize;
|
|
Ty = AT->getElementType();
|
|
} else {
|
|
// Otherwise, we can't index into the middle of this atomic type, bail.
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
return Ty;
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
|
|
|
|
if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD))
|
|
return ReplaceInstUsesWith(GEP, V);
|
|
|
|
Value *PtrOp = GEP.getOperand(0);
|
|
|
|
if (isa<UndefValue>(GEP.getOperand(0)))
|
|
return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
|
|
|
|
// Eliminate unneeded casts for indices.
|
|
if (TD) {
|
|
bool MadeChange = false;
|
|
unsigned PtrSize = TD->getPointerSizeInBits();
|
|
|
|
gep_type_iterator GTI = gep_type_begin(GEP);
|
|
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
|
|
I != E; ++I, ++GTI) {
|
|
if (!isa<SequentialType>(*GTI)) continue;
|
|
|
|
// If we are using a wider index than needed for this platform, shrink it
|
|
// to what we need. If narrower, sign-extend it to what we need. This
|
|
// explicit cast can make subsequent optimizations more obvious.
|
|
unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
|
|
if (OpBits == PtrSize)
|
|
continue;
|
|
|
|
*I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
|
|
MadeChange = true;
|
|
}
|
|
if (MadeChange) return &GEP;
|
|
}
|
|
|
|
// Combine Indices - If the source pointer to this getelementptr instruction
|
|
// is a getelementptr instruction, combine the indices of the two
|
|
// getelementptr instructions into a single instruction.
|
|
//
|
|
if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
|
|
// Note that if our source is a gep chain itself that we wait for that
|
|
// chain to be resolved before we perform this transformation. This
|
|
// avoids us creating a TON of code in some cases.
|
|
//
|
|
if (GetElementPtrInst *SrcGEP =
|
|
dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
|
|
if (SrcGEP->getNumOperands() == 2)
|
|
return 0; // Wait until our source is folded to completion.
|
|
|
|
SmallVector<Value*, 8> Indices;
|
|
|
|
// Find out whether the last index in the source GEP is a sequential idx.
|
|
bool EndsWithSequential = false;
|
|
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
|
|
I != E; ++I)
|
|
EndsWithSequential = !(*I)->isStructTy();
|
|
|
|
// Can we combine the two pointer arithmetics offsets?
|
|
if (EndsWithSequential) {
|
|
// Replace: gep (gep %P, long B), long A, ...
|
|
// With: T = long A+B; gep %P, T, ...
|
|
//
|
|
Value *Sum;
|
|
Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
|
|
Value *GO1 = GEP.getOperand(1);
|
|
if (SO1 == Constant::getNullValue(SO1->getType())) {
|
|
Sum = GO1;
|
|
} else if (GO1 == Constant::getNullValue(GO1->getType())) {
|
|
Sum = SO1;
|
|
} else {
|
|
// If they aren't the same type, then the input hasn't been processed
|
|
// by the loop above yet (which canonicalizes sequential index types to
|
|
// intptr_t). Just avoid transforming this until the input has been
|
|
// normalized.
|
|
if (SO1->getType() != GO1->getType())
|
|
return 0;
|
|
Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
|
|
}
|
|
|
|
// Update the GEP in place if possible.
|
|
if (Src->getNumOperands() == 2) {
|
|
GEP.setOperand(0, Src->getOperand(0));
|
|
GEP.setOperand(1, Sum);
|
|
return &GEP;
|
|
}
|
|
Indices.append(Src->op_begin()+1, Src->op_end()-1);
|
|
Indices.push_back(Sum);
|
|
Indices.append(GEP.op_begin()+2, GEP.op_end());
|
|
} else if (isa<Constant>(*GEP.idx_begin()) &&
|
|
cast<Constant>(*GEP.idx_begin())->isNullValue() &&
|
|
Src->getNumOperands() != 1) {
|
|
// Otherwise we can do the fold if the first index of the GEP is a zero
|
|
Indices.append(Src->op_begin()+1, Src->op_end());
|
|
Indices.append(GEP.idx_begin()+1, GEP.idx_end());
|
|
}
|
|
|
|
if (!Indices.empty())
|
|
return (GEP.isInBounds() && Src->isInBounds()) ?
|
|
GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
|
|
Indices.end(), GEP.getName()) :
|
|
GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
|
|
Indices.end(), GEP.getName());
|
|
}
|
|
|
|
// Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
|
|
Value *StrippedPtr = PtrOp->stripPointerCasts();
|
|
if (StrippedPtr != PtrOp) {
|
|
const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
|
|
|
|
bool HasZeroPointerIndex = false;
|
|
if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
|
|
HasZeroPointerIndex = C->isZero();
|
|
|
|
// Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
|
|
// into : GEP [10 x i8]* X, i32 0, ...
|
|
//
|
|
// Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
|
|
// into : GEP i8* X, ...
|
|
//
|
|
// This occurs when the program declares an array extern like "int X[];"
|
|
if (HasZeroPointerIndex) {
|
|
const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
|
|
if (const ArrayType *CATy =
|
|
dyn_cast<ArrayType>(CPTy->getElementType())) {
|
|
// GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
|
|
if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
|
|
// -> GEP i8* X, ...
|
|
SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
|
|
GetElementPtrInst *Res =
|
|
GetElementPtrInst::Create(StrippedPtr, Idx.begin(),
|
|
Idx.end(), GEP.getName());
|
|
Res->setIsInBounds(GEP.isInBounds());
|
|
return Res;
|
|
}
|
|
|
|
if (const ArrayType *XATy =
|
|
dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
|
|
// GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
|
|
if (CATy->getElementType() == XATy->getElementType()) {
|
|
// -> GEP [10 x i8]* X, i32 0, ...
|
|
// At this point, we know that the cast source type is a pointer
|
|
// to an array of the same type as the destination pointer
|
|
// array. Because the array type is never stepped over (there
|
|
// is a leading zero) we can fold the cast into this GEP.
|
|
GEP.setOperand(0, StrippedPtr);
|
|
return &GEP;
|
|
}
|
|
}
|
|
}
|
|
} else if (GEP.getNumOperands() == 2) {
|
|
// Transform things like:
|
|
// %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
|
|
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
|
|
const Type *SrcElTy = StrippedPtrTy->getElementType();
|
|
const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
|
|
if (TD && SrcElTy->isArrayTy() &&
|
|
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
|
|
TD->getTypeAllocSize(ResElTy)) {
|
|
Value *Idx[2];
|
|
Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
|
|
Idx[1] = GEP.getOperand(1);
|
|
Value *NewGEP = GEP.isInBounds() ?
|
|
Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) :
|
|
Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName());
|
|
// V and GEP are both pointer types --> BitCast
|
|
return new BitCastInst(NewGEP, GEP.getType());
|
|
}
|
|
|
|
// Transform things like:
|
|
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
|
|
// (where tmp = 8*tmp2) into:
|
|
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
|
|
|
|
if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
|
|
uint64_t ArrayEltSize =
|
|
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
|
|
|
|
// Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
|
|
// allow either a mul, shift, or constant here.
|
|
Value *NewIdx = 0;
|
|
ConstantInt *Scale = 0;
|
|
if (ArrayEltSize == 1) {
|
|
NewIdx = GEP.getOperand(1);
|
|
Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
|
|
} else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
|
|
NewIdx = ConstantInt::get(CI->getType(), 1);
|
|
Scale = CI;
|
|
} else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
|
|
if (Inst->getOpcode() == Instruction::Shl &&
|
|
isa<ConstantInt>(Inst->getOperand(1))) {
|
|
ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
|
|
uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
|
|
Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
|
|
1ULL << ShAmtVal);
|
|
NewIdx = Inst->getOperand(0);
|
|
} else if (Inst->getOpcode() == Instruction::Mul &&
|
|
isa<ConstantInt>(Inst->getOperand(1))) {
|
|
Scale = cast<ConstantInt>(Inst->getOperand(1));
|
|
NewIdx = Inst->getOperand(0);
|
|
}
|
|
}
|
|
|
|
// If the index will be to exactly the right offset with the scale taken
|
|
// out, perform the transformation. Note, we don't know whether Scale is
|
|
// signed or not. We'll use unsigned version of division/modulo
|
|
// operation after making sure Scale doesn't have the sign bit set.
|
|
if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
|
|
Scale->getZExtValue() % ArrayEltSize == 0) {
|
|
Scale = ConstantInt::get(Scale->getType(),
|
|
Scale->getZExtValue() / ArrayEltSize);
|
|
if (Scale->getZExtValue() != 1) {
|
|
Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
|
|
false /*ZExt*/);
|
|
NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
|
|
}
|
|
|
|
// Insert the new GEP instruction.
|
|
Value *Idx[2];
|
|
Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
|
|
Idx[1] = NewIdx;
|
|
Value *NewGEP = GEP.isInBounds() ?
|
|
Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()):
|
|
Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName());
|
|
// The NewGEP must be pointer typed, so must the old one -> BitCast
|
|
return new BitCastInst(NewGEP, GEP.getType());
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/// See if we can simplify:
|
|
/// X = bitcast A* to B*
|
|
/// Y = gep X, <...constant indices...>
|
|
/// into a gep of the original struct. This is important for SROA and alias
|
|
/// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
|
|
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
|
|
if (TD &&
|
|
!isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
|
|
// Determine how much the GEP moves the pointer. We are guaranteed to get
|
|
// a constant back from EmitGEPOffset.
|
|
ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
|
|
int64_t Offset = OffsetV->getSExtValue();
|
|
|
|
// If this GEP instruction doesn't move the pointer, just replace the GEP
|
|
// with a bitcast of the real input to the dest type.
|
|
if (Offset == 0) {
|
|
// If the bitcast is of an allocation, and the allocation will be
|
|
// converted to match the type of the cast, don't touch this.
|
|
if (isa<AllocaInst>(BCI->getOperand(0)) ||
|
|
isMalloc(BCI->getOperand(0))) {
|
|
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
|
|
if (Instruction *I = visitBitCast(*BCI)) {
|
|
if (I != BCI) {
|
|
I->takeName(BCI);
|
|
BCI->getParent()->getInstList().insert(BCI, I);
|
|
ReplaceInstUsesWith(*BCI, I);
|
|
}
|
|
return &GEP;
|
|
}
|
|
}
|
|
return new BitCastInst(BCI->getOperand(0), GEP.getType());
|
|
}
|
|
|
|
// Otherwise, if the offset is non-zero, we need to find out if there is a
|
|
// field at Offset in 'A's type. If so, we can pull the cast through the
|
|
// GEP.
|
|
SmallVector<Value*, 8> NewIndices;
|
|
const Type *InTy =
|
|
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
|
|
if (FindElementAtOffset(InTy, Offset, NewIndices)) {
|
|
Value *NGEP = GEP.isInBounds() ?
|
|
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
|
|
NewIndices.end()) :
|
|
Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
|
|
NewIndices.end());
|
|
|
|
if (NGEP->getType() == GEP.getType())
|
|
return ReplaceInstUsesWith(GEP, NGEP);
|
|
NGEP->takeName(&GEP);
|
|
return new BitCastInst(NGEP, GEP.getType());
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
Instruction *InstCombiner::visitFree(Instruction &FI) {
|
|
Value *Op = FI.getOperand(1);
|
|
|
|
// free undef -> unreachable.
|
|
if (isa<UndefValue>(Op)) {
|
|
// Insert a new store to null because we cannot modify the CFG here.
|
|
new StoreInst(ConstantInt::getTrue(FI.getContext()),
|
|
UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI);
|
|
return EraseInstFromFunction(FI);
|
|
}
|
|
|
|
// If we have 'free null' delete the instruction. This can happen in stl code
|
|
// when lots of inlining happens.
|
|
if (isa<ConstantPointerNull>(Op))
|
|
return EraseInstFromFunction(FI);
|
|
|
|
// If we have a malloc call whose only use is a free call, delete both.
|
|
if (isMalloc(Op)) {
|
|
if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
|
|
if (Op->hasOneUse() && CI->hasOneUse()) {
|
|
EraseInstFromFunction(FI);
|
|
EraseInstFromFunction(*CI);
|
|
return EraseInstFromFunction(*cast<Instruction>(Op));
|
|
}
|
|
} else {
|
|
// Op is a call to malloc
|
|
if (Op->hasOneUse()) {
|
|
EraseInstFromFunction(FI);
|
|
return EraseInstFromFunction(*cast<Instruction>(Op));
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
|
|
// Change br (not X), label True, label False to: br X, label False, True
|
|
Value *X = 0;
|
|
BasicBlock *TrueDest;
|
|
BasicBlock *FalseDest;
|
|
if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
|
|
!isa<Constant>(X)) {
|
|
// Swap Destinations and condition...
|
|
BI.setCondition(X);
|
|
BI.setSuccessor(0, FalseDest);
|
|
BI.setSuccessor(1, TrueDest);
|
|
return &BI;
|
|
}
|
|
|
|
// Cannonicalize fcmp_one -> fcmp_oeq
|
|
FCmpInst::Predicate FPred; Value *Y;
|
|
if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
|
|
TrueDest, FalseDest)) &&
|
|
BI.getCondition()->hasOneUse())
|
|
if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
|
|
FPred == FCmpInst::FCMP_OGE) {
|
|
FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
|
|
Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
|
|
|
|
// Swap Destinations and condition.
|
|
BI.setSuccessor(0, FalseDest);
|
|
BI.setSuccessor(1, TrueDest);
|
|
Worklist.Add(Cond);
|
|
return &BI;
|
|
}
|
|
|
|
// Cannonicalize icmp_ne -> icmp_eq
|
|
ICmpInst::Predicate IPred;
|
|
if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
|
|
TrueDest, FalseDest)) &&
|
|
BI.getCondition()->hasOneUse())
|
|
if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
|
|
IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
|
|
IPred == ICmpInst::ICMP_SGE) {
|
|
ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
|
|
Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
|
|
// Swap Destinations and condition.
|
|
BI.setSuccessor(0, FalseDest);
|
|
BI.setSuccessor(1, TrueDest);
|
|
Worklist.Add(Cond);
|
|
return &BI;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
|
|
Value *Cond = SI.getCondition();
|
|
if (Instruction *I = dyn_cast<Instruction>(Cond)) {
|
|
if (I->getOpcode() == Instruction::Add)
|
|
if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
|
// change 'switch (X+4) case 1:' into 'switch (X) case -3'
|
|
for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
|
|
SI.setOperand(i,
|
|
ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
|
|
AddRHS));
|
|
SI.setOperand(0, I->getOperand(0));
|
|
Worklist.Add(I);
|
|
return &SI;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|
Value *Agg = EV.getAggregateOperand();
|
|
|
|
if (!EV.hasIndices())
|
|
return ReplaceInstUsesWith(EV, Agg);
|
|
|
|
if (Constant *C = dyn_cast<Constant>(Agg)) {
|
|
if (isa<UndefValue>(C))
|
|
return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
|
|
|
|
if (isa<ConstantAggregateZero>(C))
|
|
return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
|
|
|
|
if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
|
|
// Extract the element indexed by the first index out of the constant
|
|
Value *V = C->getOperand(*EV.idx_begin());
|
|
if (EV.getNumIndices() > 1)
|
|
// Extract the remaining indices out of the constant indexed by the
|
|
// first index
|
|
return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
|
|
else
|
|
return ReplaceInstUsesWith(EV, V);
|
|
}
|
|
return 0; // Can't handle other constants
|
|
}
|
|
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
|
|
// We're extracting from an insertvalue instruction, compare the indices
|
|
const unsigned *exti, *exte, *insi, *inse;
|
|
for (exti = EV.idx_begin(), insi = IV->idx_begin(),
|
|
exte = EV.idx_end(), inse = IV->idx_end();
|
|
exti != exte && insi != inse;
|
|
++exti, ++insi) {
|
|
if (*insi != *exti)
|
|
// The insert and extract both reference distinctly different elements.
|
|
// This means the extract is not influenced by the insert, and we can
|
|
// replace the aggregate operand of the extract with the aggregate
|
|
// operand of the insert. i.e., replace
|
|
// %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
|
|
// %E = extractvalue { i32, { i32 } } %I, 0
|
|
// with
|
|
// %E = extractvalue { i32, { i32 } } %A, 0
|
|
return ExtractValueInst::Create(IV->getAggregateOperand(),
|
|
EV.idx_begin(), EV.idx_end());
|
|
}
|
|
if (exti == exte && insi == inse)
|
|
// Both iterators are at the end: Index lists are identical. Replace
|
|
// %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
|
|
// %C = extractvalue { i32, { i32 } } %B, 1, 0
|
|
// with "i32 42"
|
|
return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
|
|
if (exti == exte) {
|
|
// The extract list is a prefix of the insert list. i.e. replace
|
|
// %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
|
|
// %E = extractvalue { i32, { i32 } } %I, 1
|
|
// with
|
|
// %X = extractvalue { i32, { i32 } } %A, 1
|
|
// %E = insertvalue { i32 } %X, i32 42, 0
|
|
// by switching the order of the insert and extract (though the
|
|
// insertvalue should be left in, since it may have other uses).
|
|
Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
|
|
EV.idx_begin(), EV.idx_end());
|
|
return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
|
|
insi, inse);
|
|
}
|
|
if (insi == inse)
|
|
// The insert list is a prefix of the extract list
|
|
// We can simply remove the common indices from the extract and make it
|
|
// operate on the inserted value instead of the insertvalue result.
|
|
// i.e., replace
|
|
// %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
|
|
// %E = extractvalue { i32, { i32 } } %I, 1, 0
|
|
// with
|
|
// %E extractvalue { i32 } { i32 42 }, 0
|
|
return ExtractValueInst::Create(IV->getInsertedValueOperand(),
|
|
exti, exte);
|
|
}
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
|
|
// We're extracting from an intrinsic, see if we're the only user, which
|
|
// allows us to simplify multiple result intrinsics to simpler things that
|
|
// just get one value..
|
|
if (II->hasOneUse()) {
|
|
// Check if we're grabbing the overflow bit or the result of a 'with
|
|
// overflow' intrinsic. If it's the latter we can remove the intrinsic
|
|
// and replace it with a traditional binary instruction.
|
|
switch (II->getIntrinsicID()) {
|
|
case Intrinsic::uadd_with_overflow:
|
|
case Intrinsic::sadd_with_overflow:
|
|
if (*EV.idx_begin() == 0) { // Normal result.
|
|
Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
|
|
II->replaceAllUsesWith(UndefValue::get(II->getType()));
|
|
EraseInstFromFunction(*II);
|
|
return BinaryOperator::CreateAdd(LHS, RHS);
|
|
}
|
|
break;
|
|
case Intrinsic::usub_with_overflow:
|
|
case Intrinsic::ssub_with_overflow:
|
|
if (*EV.idx_begin() == 0) { // Normal result.
|
|
Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
|
|
II->replaceAllUsesWith(UndefValue::get(II->getType()));
|
|
EraseInstFromFunction(*II);
|
|
return BinaryOperator::CreateSub(LHS, RHS);
|
|
}
|
|
break;
|
|
case Intrinsic::umul_with_overflow:
|
|
case Intrinsic::smul_with_overflow:
|
|
if (*EV.idx_begin() == 0) { // Normal result.
|
|
Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
|
|
II->replaceAllUsesWith(UndefValue::get(II->getType()));
|
|
EraseInstFromFunction(*II);
|
|
return BinaryOperator::CreateMul(LHS, RHS);
|
|
}
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
// Can't simplify extracts from other values. Note that nested extracts are
|
|
// already simplified implicitely by the above (extract ( extract (insert) )
|
|
// will be translated into extract ( insert ( extract ) ) first and then just
|
|
// the value inserted, if appropriate).
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
|
|
/// TryToSinkInstruction - Try to move the specified instruction from its
|
|
/// current block into the beginning of DestBlock, which can only happen if it's
|
|
/// safe to move the instruction past all of the instructions between it and the
|
|
/// end of its block.
|
|
static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
|
|
assert(I->hasOneUse() && "Invariants didn't hold!");
|
|
|
|
// Cannot move control-flow-involving, volatile loads, vaarg, etc.
|
|
if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
|
|
return false;
|
|
|
|
// Do not sink alloca instructions out of the entry block.
|
|
if (isa<AllocaInst>(I) && I->getParent() ==
|
|
&DestBlock->getParent()->getEntryBlock())
|
|
return false;
|
|
|
|
// We can only sink load instructions if there is nothing between the load and
|
|
// the end of block that could change the value.
|
|
if (I->mayReadFromMemory()) {
|
|
for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
|
|
Scan != E; ++Scan)
|
|
if (Scan->mayWriteToMemory())
|
|
return false;
|
|
}
|
|
|
|
BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
|
|
|
|
I->moveBefore(InsertPos);
|
|
++NumSunkInst;
|
|
return true;
|
|
}
|
|
|
|
|
|
/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
|
|
/// all reachable code to the worklist.
|
|
///
|
|
/// This has a couple of tricks to make the code faster and more powerful. In
|
|
/// particular, we constant fold and DCE instructions as we go, to avoid adding
|
|
/// them to the worklist (this significantly speeds up instcombine on code where
|
|
/// many instructions are dead or constant). Additionally, if we find a branch
|
|
/// whose condition is a known constant, we only visit the reachable successors.
|
|
///
|
|
static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
|
SmallPtrSet<BasicBlock*, 64> &Visited,
|
|
InstCombiner &IC,
|
|
const TargetData *TD) {
|
|
bool MadeIRChange = false;
|
|
SmallVector<BasicBlock*, 256> Worklist;
|
|
Worklist.push_back(BB);
|
|
|
|
std::vector<Instruction*> InstrsForInstCombineWorklist;
|
|
InstrsForInstCombineWorklist.reserve(128);
|
|
|
|
SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
|
|
|
|
do {
|
|
BB = Worklist.pop_back_val();
|
|
|
|
// We have now visited this block! If we've already been here, ignore it.
|
|
if (!Visited.insert(BB)) continue;
|
|
|
|
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
|
|
Instruction *Inst = BBI++;
|
|
|
|
// DCE instruction if trivially dead.
|
|
if (isInstructionTriviallyDead(Inst)) {
|
|
++NumDeadInst;
|
|
DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
|
|
Inst->eraseFromParent();
|
|
continue;
|
|
}
|
|
|
|
// ConstantProp instruction if trivially constant.
|
|
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
|
|
if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
|
|
DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
|
|
<< *Inst << '\n');
|
|
Inst->replaceAllUsesWith(C);
|
|
++NumConstProp;
|
|
Inst->eraseFromParent();
|
|
continue;
|
|
}
|
|
|
|
if (TD) {
|
|
// See if we can constant fold its operands.
|
|
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
|
|
i != e; ++i) {
|
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
|
|
if (CE == 0) continue;
|
|
|
|
// If we already folded this constant, don't try again.
|
|
if (!FoldedConstants.insert(CE))
|
|
continue;
|
|
|
|
Constant *NewC = ConstantFoldConstantExpression(CE, TD);
|
|
if (NewC && NewC != CE) {
|
|
*i = NewC;
|
|
MadeIRChange = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
InstrsForInstCombineWorklist.push_back(Inst);
|
|
}
|
|
|
|
// Recursively visit successors. If this is a branch or switch on a
|
|
// constant, only visit the reachable successor.
|
|
TerminatorInst *TI = BB->getTerminator();
|
|
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
|
|
if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
|
|
bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
|
|
BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
|
|
Worklist.push_back(ReachableBB);
|
|
continue;
|
|
}
|
|
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
|
|
if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
|
|
// See if this is an explicit destination.
|
|
for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
|
|
if (SI->getCaseValue(i) == Cond) {
|
|
BasicBlock *ReachableBB = SI->getSuccessor(i);
|
|
Worklist.push_back(ReachableBB);
|
|
continue;
|
|
}
|
|
|
|
// Otherwise it is the default destination.
|
|
Worklist.push_back(SI->getSuccessor(0));
|
|
continue;
|
|
}
|
|
}
|
|
|
|
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
|
|
Worklist.push_back(TI->getSuccessor(i));
|
|
} while (!Worklist.empty());
|
|
|
|
// Once we've found all of the instructions to add to instcombine's worklist,
|
|
// add them in reverse order. This way instcombine will visit from the top
|
|
// of the function down. This jives well with the way that it adds all uses
|
|
// of instructions to the worklist after doing a transformation, thus avoiding
|
|
// some N^2 behavior in pathological cases.
|
|
IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
|
|
InstrsForInstCombineWorklist.size());
|
|
|
|
return MadeIRChange;
|
|
}
|
|
|
|
bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
|
|
MadeIRChange = false;
|
|
|
|
DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
|
|
<< F.getNameStr() << "\n");
|
|
|
|
{
|
|
// Do a depth-first traversal of the function, populate the worklist with
|
|
// the reachable instructions. Ignore blocks that are not reachable. Keep
|
|
// track of which blocks we visit.
|
|
SmallPtrSet<BasicBlock*, 64> Visited;
|
|
MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
|
|
|
|
// Do a quick scan over the function. If we find any blocks that are
|
|
// unreachable, remove any instructions inside of them. This prevents
|
|
// the instcombine code from having to deal with some bad special cases.
|
|
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
|
|
if (!Visited.count(BB)) {
|
|
Instruction *Term = BB->getTerminator();
|
|
while (Term != BB->begin()) { // Remove instrs bottom-up
|
|
BasicBlock::iterator I = Term; --I;
|
|
|
|
DEBUG(errs() << "IC: DCE: " << *I << '\n');
|
|
// A debug intrinsic shouldn't force another iteration if we weren't
|
|
// going to do one without it.
|
|
if (!isa<DbgInfoIntrinsic>(I)) {
|
|
++NumDeadInst;
|
|
MadeIRChange = true;
|
|
}
|
|
|
|
// If I is not void type then replaceAllUsesWith undef.
|
|
// This allows ValueHandlers and custom metadata to adjust itself.
|
|
if (!I->getType()->isVoidTy())
|
|
I->replaceAllUsesWith(UndefValue::get(I->getType()));
|
|
I->eraseFromParent();
|
|
}
|
|
}
|
|
}
|
|
|
|
while (!Worklist.isEmpty()) {
|
|
Instruction *I = Worklist.RemoveOne();
|
|
if (I == 0) continue; // skip null values.
|
|
|
|
// Check to see if we can DCE the instruction.
|
|
if (isInstructionTriviallyDead(I)) {
|
|
DEBUG(errs() << "IC: DCE: " << *I << '\n');
|
|
EraseInstFromFunction(*I);
|
|
++NumDeadInst;
|
|
MadeIRChange = true;
|
|
continue;
|
|
}
|
|
|
|
// Instruction isn't dead, see if we can constant propagate it.
|
|
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
|
|
if (Constant *C = ConstantFoldInstruction(I, TD)) {
|
|
DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
|
|
|
|
// Add operands to the worklist.
|
|
ReplaceInstUsesWith(*I, C);
|
|
++NumConstProp;
|
|
EraseInstFromFunction(*I);
|
|
MadeIRChange = true;
|
|
continue;
|
|
}
|
|
|
|
// See if we can trivially sink this instruction to a successor basic block.
|
|
if (I->hasOneUse()) {
|
|
BasicBlock *BB = I->getParent();
|
|
Instruction *UserInst = cast<Instruction>(I->use_back());
|
|
BasicBlock *UserParent;
|
|
|
|
// Get the block the use occurs in.
|
|
if (PHINode *PN = dyn_cast<PHINode>(UserInst))
|
|
UserParent = PN->getIncomingBlock(I->use_begin().getUse());
|
|
else
|
|
UserParent = UserInst->getParent();
|
|
|
|
if (UserParent != BB) {
|
|
bool UserIsSuccessor = false;
|
|
// See if the user is one of our successors.
|
|
for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
|
|
if (*SI == UserParent) {
|
|
UserIsSuccessor = true;
|
|
break;
|
|
}
|
|
|
|
// If the user is one of our immediate successors, and if that successor
|
|
// only has us as a predecessors (we'd have to split the critical edge
|
|
// otherwise), we can keep going.
|
|
if (UserIsSuccessor && UserParent->getSinglePredecessor())
|
|
// Okay, the CFG is simple enough, try to sink this instruction.
|
|
MadeIRChange |= TryToSinkInstruction(I, UserParent);
|
|
}
|
|
}
|
|
|
|
// Now that we have an instruction, try combining it to simplify it.
|
|
Builder->SetInsertPoint(I->getParent(), I);
|
|
|
|
#ifndef NDEBUG
|
|
std::string OrigI;
|
|
#endif
|
|
DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
|
|
DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
|
|
|
|
if (Instruction *Result = visit(*I)) {
|
|
++NumCombined;
|
|
// Should we replace the old instruction with a new one?
|
|
if (Result != I) {
|
|
DEBUG(errs() << "IC: Old = " << *I << '\n'
|
|
<< " New = " << *Result << '\n');
|
|
|
|
// Everything uses the new instruction now.
|
|
I->replaceAllUsesWith(Result);
|
|
|
|
// Push the new instruction and any users onto the worklist.
|
|
Worklist.Add(Result);
|
|
Worklist.AddUsersToWorkList(*Result);
|
|
|
|
// Move the name to the new instruction first.
|
|
Result->takeName(I);
|
|
|
|
// Insert the new instruction into the basic block...
|
|
BasicBlock *InstParent = I->getParent();
|
|
BasicBlock::iterator InsertPos = I;
|
|
|
|
if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
|
|
while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
|
|
++InsertPos;
|
|
|
|
InstParent->getInstList().insert(InsertPos, Result);
|
|
|
|
EraseInstFromFunction(*I);
|
|
} else {
|
|
#ifndef NDEBUG
|
|
DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
|
|
<< " New = " << *I << '\n');
|
|
#endif
|
|
|
|
// If the instruction was modified, it's possible that it is now dead.
|
|
// if so, remove it.
|
|
if (isInstructionTriviallyDead(I)) {
|
|
EraseInstFromFunction(*I);
|
|
} else {
|
|
Worklist.Add(I);
|
|
Worklist.AddUsersToWorkList(*I);
|
|
}
|
|
}
|
|
MadeIRChange = true;
|
|
}
|
|
}
|
|
|
|
Worklist.Zap();
|
|
return MadeIRChange;
|
|
}
|
|
|
|
|
|
bool InstCombiner::runOnFunction(Function &F) {
|
|
MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
|
|
TD = getAnalysisIfAvailable<TargetData>();
|
|
|
|
|
|
/// Builder - This is an IRBuilder that automatically inserts new
|
|
/// instructions into the worklist when they are created.
|
|
IRBuilder<true, TargetFolder, InstCombineIRInserter>
|
|
TheBuilder(F.getContext(), TargetFolder(TD),
|
|
InstCombineIRInserter(Worklist));
|
|
Builder = &TheBuilder;
|
|
|
|
bool EverMadeChange = false;
|
|
|
|
// Iterate while there is work to do.
|
|
unsigned Iteration = 0;
|
|
while (DoOneIteration(F, Iteration++))
|
|
EverMadeChange = true;
|
|
|
|
Builder = 0;
|
|
return EverMadeChange;
|
|
}
|
|
|
|
FunctionPass *llvm::createInstructionCombiningPass() {
|
|
return new InstCombiner();
|
|
}
|