2010-01-05 13:57:49 +08:00
|
|
|
//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the visit functions for load, store and alloca.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "InstCombine.h"
|
|
|
|
#include "llvm/IntrinsicInst.h"
|
2010-05-29 00:19:17 +08:00
|
|
|
#include "llvm/Analysis/Loads.h"
|
2010-01-05 13:57:49 +08:00
|
|
|
#include "llvm/Target/TargetData.h"
|
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
2010-05-28 23:09:00 +08:00
|
|
|
// Ensure that the alloca array size argument has type intptr_t, so that
|
|
|
|
// any casting is exposed early.
|
|
|
|
if (TD) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
|
2010-05-28 23:09:00 +08:00
|
|
|
if (AI.getArraySize()->getType() != IntPtrTy) {
|
|
|
|
Value *V = Builder->CreateIntCast(AI.getArraySize(),
|
|
|
|
IntPtrTy, false);
|
|
|
|
AI.setOperand(0, V);
|
|
|
|
return &AI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-05 13:57:49 +08:00
|
|
|
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
|
|
|
|
if (AI.isArrayAllocation()) { // Check C != 1
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *NewTy =
|
2010-01-05 13:57:49 +08:00
|
|
|
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
|
|
|
|
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
|
|
|
|
New->setAlignment(AI.getAlignment());
|
|
|
|
|
|
|
|
// Scan to the end of the allocation instructions, to skip over a block of
|
|
|
|
// allocas if possible...also skip interleaved debug info
|
|
|
|
//
|
|
|
|
BasicBlock::iterator It = New;
|
|
|
|
while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
|
|
|
|
|
|
|
|
// Now that I is pointing to the first non-allocation-inst in the block,
|
|
|
|
// insert our getelementptr instruction...
|
|
|
|
//
|
|
|
|
Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
|
|
|
|
Value *Idx[2];
|
|
|
|
Idx[0] = NullIdx;
|
|
|
|
Idx[1] = NullIdx;
|
2011-05-19 07:58:37 +08:00
|
|
|
Instruction *GEP =
|
2011-07-25 17:48:08 +08:00
|
|
|
GetElementPtrInst::CreateInBounds(New, Idx, New->getName()+".sub");
|
2011-05-19 07:58:37 +08:00
|
|
|
InsertNewInstBefore(GEP, *It);
|
2010-01-05 13:57:49 +08:00
|
|
|
|
|
|
|
// Now make everything use the getelementptr instead of the original
|
|
|
|
// allocation.
|
2011-05-19 07:58:37 +08:00
|
|
|
return ReplaceInstUsesWith(AI, GEP);
|
2010-01-05 13:57:49 +08:00
|
|
|
} else if (isa<UndefValue>(AI.getArraySize())) {
|
|
|
|
return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
llvm-svn: 159202
2012-06-26 21:39:21 +08:00
|
|
|
if (TD && AI.getAllocatedType()->isSized()) {
|
2010-01-05 13:57:49 +08:00
|
|
|
// If the alignment is 0 (unspecified), assign it the preferred alignment.
|
|
|
|
if (AI.getAlignment() == 0)
|
|
|
|
AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
llvm-svn: 159202
2012-06-26 21:39:21 +08:00
|
|
|
|
|
|
|
// Move all alloca's of zero byte objects to the entry block and merge them
|
|
|
|
// together. Note that we only do this for alloca's, because malloc should
|
|
|
|
// allocate and return a unique pointer, even for a zero byte allocation.
|
|
|
|
if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
|
|
|
|
// For a zero sized alloca there is no point in doing an array allocation.
|
|
|
|
// This is helpful if the array size is a complicated expression not used
|
|
|
|
// elsewhere.
|
|
|
|
if (AI.isArrayAllocation()) {
|
|
|
|
AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
|
|
|
|
return &AI;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the first instruction in the entry block.
|
|
|
|
BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
|
|
|
|
Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
|
|
|
|
if (FirstInst != &AI) {
|
|
|
|
// If the entry block doesn't start with a zero-size alloca then move
|
|
|
|
// this one to the start of the entry block. There is no problem with
|
|
|
|
// dominance as the array size was forced to a constant earlier already.
|
|
|
|
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
|
|
|
|
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
|
|
|
|
TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
|
|
|
|
AI.moveBefore(FirstInst);
|
|
|
|
return &AI;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace this zero-sized alloca with the one at the start of the entry
|
|
|
|
// block after ensuring that the address will be aligned enough for both
|
|
|
|
// types.
|
|
|
|
unsigned MaxAlign =
|
|
|
|
std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()),
|
|
|
|
TD->getPrefTypeAlignment(AI.getAllocatedType()));
|
|
|
|
EntryAI->setAlignment(MaxAlign);
|
|
|
|
if (AI.getType() != EntryAI->getType())
|
|
|
|
return new BitCastInst(EntryAI, AI.getType());
|
|
|
|
return ReplaceInstUsesWith(AI, EntryAI);
|
|
|
|
}
|
|
|
|
}
|
2010-01-05 13:57:49 +08:00
|
|
|
}
|
|
|
|
|
2012-07-10 02:38:20 +08:00
|
|
|
// At last, use the generic allocation site handler to aggressively remove
|
|
|
|
// unused allocas.
|
|
|
|
return visitAllocSite(AI);
|
2010-01-05 13:57:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
|
|
|
|
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
|
|
|
|
const TargetData *TD) {
|
|
|
|
User *CI = cast<User>(LI.getOperand(0));
|
|
|
|
Value *CastOp = CI->getOperand(0);
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *DestTy = cast<PointerType>(CI->getType());
|
|
|
|
Type *DestPTy = DestTy->getElementType();
|
|
|
|
if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
|
2010-01-05 13:57:49 +08:00
|
|
|
|
|
|
|
// If the address spaces don't match, don't eliminate the cast.
|
|
|
|
if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
|
|
|
|
return 0;
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcPTy = SrcTy->getElementType();
|
2010-01-05 13:57:49 +08:00
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
|
|
|
|
DestPTy->isVectorTy()) {
|
2010-01-05 13:57:49 +08:00
|
|
|
// If the source is an array, the code below will not succeed. Check to
|
|
|
|
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
|
|
|
|
// constants.
|
2011-07-18 12:54:35 +08:00
|
|
|
if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
|
2010-01-05 13:57:49 +08:00
|
|
|
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
|
|
|
|
if (ASrcTy->getNumElements() != 0) {
|
|
|
|
Value *Idxs[2];
|
|
|
|
Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
|
|
|
|
Idxs[1] = Idxs[0];
|
2011-07-22 15:54:01 +08:00
|
|
|
CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
|
2010-01-05 13:57:49 +08:00
|
|
|
SrcTy = cast<PointerType>(CastOp->getType());
|
|
|
|
SrcPTy = SrcTy->getElementType();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IC.getTargetData() &&
|
2010-02-16 19:11:14 +08:00
|
|
|
(SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
|
|
|
|
SrcPTy->isVectorTy()) &&
|
2010-01-05 13:57:49 +08:00
|
|
|
// Do not allow turning this into a load of an integer, which is then
|
|
|
|
// casted to a pointer, this pessimizes pointer analysis a lot.
|
2010-02-16 19:11:14 +08:00
|
|
|
(SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
|
2010-01-05 13:57:49 +08:00
|
|
|
IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
|
|
|
|
IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
|
|
|
|
|
|
|
|
// Okay, we are casting from one integer or pointer type to another of
|
|
|
|
// the same size. Instead of casting the pointer before the load, cast
|
|
|
|
// the result of the loaded value.
|
2010-01-30 08:41:10 +08:00
|
|
|
LoadInst *NewLoad =
|
2010-01-05 13:57:49 +08:00
|
|
|
IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
|
2010-01-30 08:41:10 +08:00
|
|
|
NewLoad->setAlignment(LI.getAlignment());
|
2011-08-16 06:09:40 +08:00
|
|
|
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
2010-01-05 13:57:49 +08:00
|
|
|
// Now cast the result of the load.
|
|
|
|
return new BitCastInst(NewLoad, LI.getType());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|
|
|
Value *Op = LI.getOperand(0);
|
|
|
|
|
|
|
|
// Attempt to improve the alignment.
|
|
|
|
if (TD) {
|
|
|
|
unsigned KnownAlign =
|
2010-12-26 04:37:57 +08:00
|
|
|
getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
|
2010-08-04 02:20:32 +08:00
|
|
|
unsigned LoadAlign = LI.getAlignment();
|
|
|
|
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
|
|
|
|
TD->getABITypeAlignment(LI.getType());
|
|
|
|
|
|
|
|
if (KnownAlign > EffectiveLoadAlign)
|
2010-01-05 13:57:49 +08:00
|
|
|
LI.setAlignment(KnownAlign);
|
2010-08-04 02:20:32 +08:00
|
|
|
else if (LoadAlign == 0)
|
|
|
|
LI.setAlignment(EffectiveLoadAlign);
|
2010-01-05 13:57:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// load (cast X) --> cast (load X) iff safe.
|
|
|
|
if (isa<CastInst>(Op))
|
|
|
|
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
|
|
|
|
return Res;
|
|
|
|
|
2011-08-16 06:09:40 +08:00
|
|
|
// None of the following transforms are legal for volatile/atomic loads.
|
|
|
|
// FIXME: Some of it is okay for atomic loads; needs refactoring.
|
|
|
|
if (!LI.isSimple()) return 0;
|
2010-01-05 13:57:49 +08:00
|
|
|
|
|
|
|
// Do really simple store-to-load forwarding and load CSE, to catch cases
|
2011-02-15 17:23:02 +08:00
|
|
|
// where there are several consecutive memory accesses to the same location,
|
2010-01-05 13:57:49 +08:00
|
|
|
// separated by a few arithmetic operations.
|
|
|
|
BasicBlock::iterator BBI = &LI;
|
|
|
|
if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
|
|
|
|
return ReplaceInstUsesWith(LI, AvailableVal);
|
|
|
|
|
|
|
|
// load(gep null, ...) -> unreachable
|
|
|
|
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
|
|
|
|
const Value *GEPI0 = GEPI->getOperand(0);
|
|
|
|
// TODO: Consider a target hook for valid address spaces for this xform.
|
|
|
|
if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
|
|
|
|
// Insert a new store to null instruction before the load to indicate
|
|
|
|
// that this code is not reachable. We do this instead of inserting
|
|
|
|
// an unreachable instruction directly because we cannot modify the
|
|
|
|
// CFG.
|
|
|
|
new StoreInst(UndefValue::get(LI.getType()),
|
|
|
|
Constant::getNullValue(Op->getType()), &LI);
|
|
|
|
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// load null/undef -> unreachable
|
|
|
|
// TODO: Consider a target hook for valid address spaces for this xform.
|
|
|
|
if (isa<UndefValue>(Op) ||
|
|
|
|
(isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
|
|
|
|
// Insert a new store to null instruction before the load to indicate that
|
|
|
|
// this code is not reachable. We do this instead of inserting an
|
|
|
|
// unreachable instruction directly because we cannot modify the CFG.
|
|
|
|
new StoreInst(UndefValue::get(LI.getType()),
|
|
|
|
Constant::getNullValue(Op->getType()), &LI);
|
|
|
|
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Instcombine load (constantexpr_cast global) -> cast (load global)
|
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
|
|
|
|
if (CE->isCast())
|
|
|
|
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
|
|
|
|
return Res;
|
|
|
|
|
|
|
|
if (Op->hasOneUse()) {
|
|
|
|
// Change select and PHI nodes to select values instead of addresses: this
|
|
|
|
// helps alias analysis out a lot, allows many others simplifications, and
|
|
|
|
// exposes redundancy in the code.
|
|
|
|
//
|
|
|
|
// Note that we cannot do the transformation unless we know that the
|
|
|
|
// introduced loads cannot trap! Something like this is valid as long as
|
|
|
|
// the condition is always false: load (select bool %C, int* null, int* %G),
|
|
|
|
// but it would not be valid if we transformed it to load from null
|
|
|
|
// unconditionally.
|
|
|
|
//
|
|
|
|
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
|
|
|
|
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
|
2010-01-30 12:42:39 +08:00
|
|
|
unsigned Align = LI.getAlignment();
|
|
|
|
if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
|
|
|
|
isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
|
2010-01-30 08:41:10 +08:00
|
|
|
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
|
2010-01-30 12:42:39 +08:00
|
|
|
SI->getOperand(1)->getName()+".val");
|
2010-01-30 08:41:10 +08:00
|
|
|
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
|
2010-01-30 12:42:39 +08:00
|
|
|
SI->getOperand(2)->getName()+".val");
|
|
|
|
V1->setAlignment(Align);
|
|
|
|
V2->setAlignment(Align);
|
2010-01-05 13:57:49 +08:00
|
|
|
return SelectInst::Create(SI->getCondition(), V1, V2);
|
|
|
|
}
|
|
|
|
|
|
|
|
// load (select (cond, null, P)) -> load P
|
|
|
|
if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
|
|
|
|
if (C->isNullValue()) {
|
|
|
|
LI.setOperand(0, SI->getOperand(2));
|
|
|
|
return &LI;
|
|
|
|
}
|
|
|
|
|
|
|
|
// load (select (cond, P, null)) -> load P
|
|
|
|
if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
|
|
|
|
if (C->isNullValue()) {
|
|
|
|
LI.setOperand(0, SI->getOperand(1));
|
|
|
|
return &LI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
|
|
|
|
/// when possible. This makes it generally easy to do alias analysis and/or
|
|
|
|
/// SROA/mem2reg of the memory object.
|
|
|
|
static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
|
|
|
|
User *CI = cast<User>(SI.getOperand(1));
|
|
|
|
Value *CastOp = CI->getOperand(0);
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
|
|
|
|
PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
|
2010-01-05 13:57:49 +08:00
|
|
|
if (SrcTy == 0) return 0;
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcPTy = SrcTy->getElementType();
|
2010-01-05 13:57:49 +08:00
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
|
2010-01-05 13:57:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
|
|
|
|
/// to its first element. This allows us to handle things like:
|
|
|
|
/// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
|
|
|
|
/// on 32-bit hosts.
|
|
|
|
SmallVector<Value*, 4> NewGEPIndices;
|
|
|
|
|
|
|
|
// If the source is an array, the code below will not succeed. Check to
|
|
|
|
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
|
|
|
|
// constants.
|
2010-02-16 19:11:14 +08:00
|
|
|
if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
|
2010-01-05 13:57:49 +08:00
|
|
|
// Index through pointer.
|
|
|
|
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
|
|
|
|
NewGEPIndices.push_back(Zero);
|
|
|
|
|
|
|
|
while (1) {
|
2011-07-18 12:54:35 +08:00
|
|
|
if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
|
2010-01-05 13:57:49 +08:00
|
|
|
if (!STy->getNumElements()) /* Struct can be empty {} */
|
|
|
|
break;
|
|
|
|
NewGEPIndices.push_back(Zero);
|
|
|
|
SrcPTy = STy->getElementType(0);
|
2011-07-18 12:54:35 +08:00
|
|
|
} else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
|
2010-01-05 13:57:49 +08:00
|
|
|
NewGEPIndices.push_back(Zero);
|
|
|
|
SrcPTy = ATy->getElementType();
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
|
|
|
|
}
|
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
|
2010-01-05 13:57:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// If the pointers point into different address spaces or if they point to
|
|
|
|
// values with different sizes, we can't do the transformation.
|
|
|
|
if (!IC.getTargetData() ||
|
|
|
|
SrcTy->getAddressSpace() !=
|
|
|
|
cast<PointerType>(CI->getType())->getAddressSpace() ||
|
|
|
|
IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
|
|
|
|
IC.getTargetData()->getTypeSizeInBits(DestPTy))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Okay, we are casting from one integer or pointer type to another of
|
|
|
|
// the same size. Instead of casting the pointer before
|
|
|
|
// the store, cast the value to be stored.
|
|
|
|
Value *NewCast;
|
|
|
|
Value *SIOp0 = SI.getOperand(0);
|
|
|
|
Instruction::CastOps opcode = Instruction::BitCast;
|
2011-07-18 12:54:35 +08:00
|
|
|
Type* CastSrcTy = SIOp0->getType();
|
|
|
|
Type* CastDstTy = SrcPTy;
|
2010-02-16 19:11:14 +08:00
|
|
|
if (CastDstTy->isPointerTy()) {
|
2010-02-16 00:12:20 +08:00
|
|
|
if (CastSrcTy->isIntegerTy())
|
2010-01-05 13:57:49 +08:00
|
|
|
opcode = Instruction::IntToPtr;
|
2010-02-16 19:11:14 +08:00
|
|
|
} else if (CastDstTy->isIntegerTy()) {
|
|
|
|
if (SIOp0->getType()->isPointerTy())
|
2010-01-05 13:57:49 +08:00
|
|
|
opcode = Instruction::PtrToInt;
|
|
|
|
}
|
|
|
|
|
|
|
|
// SIOp0 is a pointer to aggregate and this is a store to the first field,
|
|
|
|
// emit a GEP to index into its first field.
|
|
|
|
if (!NewGEPIndices.empty())
|
2011-07-22 16:16:57 +08:00
|
|
|
CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
|
2010-01-05 13:57:49 +08:00
|
|
|
|
|
|
|
NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
|
|
|
|
SIOp0->getName()+".c");
|
2010-10-26 00:16:27 +08:00
|
|
|
SI.setOperand(0, NewCast);
|
|
|
|
SI.setOperand(1, CastOp);
|
|
|
|
return &SI;
|
2010-01-05 13:57:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// equivalentAddressValues - Test if A and B will obviously have the same
|
|
|
|
/// value. This includes recognizing that %t0 and %t1 will have the same
|
|
|
|
/// value in code like this:
|
|
|
|
/// %t0 = getelementptr \@a, 0, 3
|
|
|
|
/// store i32 0, i32* %t0
|
|
|
|
/// %t1 = getelementptr \@a, 0, 3
|
|
|
|
/// %t2 = load i32* %t1
|
|
|
|
///
|
|
|
|
static bool equivalentAddressValues(Value *A, Value *B) {
|
|
|
|
// Test if the values are trivially equivalent.
|
|
|
|
if (A == B) return true;
|
|
|
|
|
|
|
|
// Test if the values come form identical arithmetic instructions.
|
|
|
|
// This uses isIdenticalToWhenDefined instead of isIdenticalTo because
|
|
|
|
// its only used to compare two uses within the same basic block, which
|
|
|
|
// means that they'll always either have the same value or one of them
|
|
|
|
// will have an undefined value.
|
|
|
|
if (isa<BinaryOperator>(A) ||
|
|
|
|
isa<CastInst>(A) ||
|
|
|
|
isa<PHINode>(A) ||
|
|
|
|
isa<GetElementPtrInst>(A))
|
|
|
|
if (Instruction *BI = dyn_cast<Instruction>(B))
|
|
|
|
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Otherwise they may not be equivalent.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|
|
|
Value *Val = SI.getOperand(0);
|
|
|
|
Value *Ptr = SI.getOperand(1);
|
|
|
|
|
|
|
|
// Attempt to improve the alignment.
|
|
|
|
if (TD) {
|
|
|
|
unsigned KnownAlign =
|
2010-12-26 04:37:57 +08:00
|
|
|
getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
|
|
|
|
TD);
|
2010-08-04 02:20:32 +08:00
|
|
|
unsigned StoreAlign = SI.getAlignment();
|
|
|
|
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
|
|
|
|
TD->getABITypeAlignment(Val->getType());
|
|
|
|
|
2012-03-17 02:20:54 +08:00
|
|
|
if (KnownAlign > EffectiveStoreAlign)
|
2010-01-05 13:57:49 +08:00
|
|
|
SI.setAlignment(KnownAlign);
|
2012-03-17 02:20:54 +08:00
|
|
|
else if (StoreAlign == 0)
|
|
|
|
SI.setAlignment(EffectiveStoreAlign);
|
2010-01-05 13:57:49 +08:00
|
|
|
}
|
|
|
|
|
2011-08-16 06:09:40 +08:00
|
|
|
// Don't hack volatile/atomic stores.
|
|
|
|
// FIXME: Some bits are legal for atomic stores; needs refactoring.
|
|
|
|
if (!SI.isSimple()) return 0;
|
|
|
|
|
|
|
|
// If the RHS is an alloca with a single use, zapify the store, making the
|
|
|
|
// alloca dead.
|
|
|
|
if (Ptr->hasOneUse()) {
|
|
|
|
if (isa<AllocaInst>(Ptr))
|
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
|
|
|
|
if (isa<AllocaInst>(GEP->getOperand(0))) {
|
|
|
|
if (GEP->getOperand(0)->hasOneUse())
|
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-05 13:57:49 +08:00
|
|
|
// Do really simple DSE, to catch cases where there are several consecutive
|
|
|
|
// stores to the same location, separated by a few arithmetic operations. This
|
|
|
|
// situation often occurs with bitfield accesses.
|
|
|
|
BasicBlock::iterator BBI = &SI;
|
|
|
|
for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
|
|
|
|
--ScanInsts) {
|
|
|
|
--BBI;
|
2010-01-23 03:05:05 +08:00
|
|
|
// Don't count debug info directives, lest they affect codegen,
|
|
|
|
// and we skip pointer-to-pointer bitcasts, which are NOPs.
|
|
|
|
if (isa<DbgInfoIntrinsic>(BBI) ||
|
2010-02-16 19:11:14 +08:00
|
|
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
|
2010-01-05 13:57:49 +08:00
|
|
|
ScanInsts++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
|
|
|
|
// Prev store isn't volatile, and stores to the same location?
|
2011-08-16 06:09:40 +08:00
|
|
|
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
|
|
|
|
SI.getOperand(1))) {
|
2010-01-05 13:57:49 +08:00
|
|
|
++NumDeadStore;
|
|
|
|
++BBI;
|
|
|
|
EraseInstFromFunction(*PrevSI);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a load, we have to stop. However, if the loaded value is from
|
|
|
|
// the pointer we're loading and is producing the pointer we're storing,
|
|
|
|
// then *this* store is dead (X = load P; store X -> P).
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
|
2011-03-14 09:21:00 +08:00
|
|
|
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
|
2011-08-16 06:09:40 +08:00
|
|
|
LI->isSimple())
|
2011-03-14 09:21:00 +08:00
|
|
|
return EraseInstFromFunction(SI);
|
2010-01-05 13:57:49 +08:00
|
|
|
|
|
|
|
// Otherwise, this is a load from some other location. Stores before it
|
|
|
|
// may not be dead.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't skip over loads or things that can modify memory.
|
|
|
|
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// store X, null -> turns into 'unreachable' in SimplifyCFG
|
|
|
|
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
|
|
|
|
if (!isa<UndefValue>(Val)) {
|
|
|
|
SI.setOperand(0, UndefValue::get(Val->getType()));
|
|
|
|
if (Instruction *U = dyn_cast<Instruction>(Val))
|
|
|
|
Worklist.Add(U); // Dropped a use.
|
|
|
|
}
|
|
|
|
return 0; // Do not modify these!
|
|
|
|
}
|
|
|
|
|
|
|
|
// store undef, Ptr -> noop
|
|
|
|
if (isa<UndefValue>(Val))
|
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
|
|
|
|
// If the pointer destination is a cast, see if we can fold the cast into the
|
|
|
|
// source instead.
|
|
|
|
if (isa<CastInst>(Ptr))
|
|
|
|
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
|
|
|
|
return Res;
|
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
|
|
|
|
if (CE->isCast())
|
|
|
|
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
|
|
|
|
return Res;
|
|
|
|
|
|
|
|
|
|
|
|
// If this store is the last instruction in the basic block (possibly
|
2010-01-22 07:07:15 +08:00
|
|
|
// excepting debug info instructions), and if the block ends with an
|
|
|
|
// unconditional branch, try to move it to the successor block.
|
2010-01-05 13:57:49 +08:00
|
|
|
BBI = &SI;
|
|
|
|
do {
|
|
|
|
++BBI;
|
2010-01-23 03:05:05 +08:00
|
|
|
} while (isa<DbgInfoIntrinsic>(BBI) ||
|
2010-02-16 19:11:14 +08:00
|
|
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
|
2010-01-05 13:57:49 +08:00
|
|
|
if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
|
|
|
|
if (BI->isUnconditional())
|
|
|
|
if (SimplifyStoreAtEndOfBlock(SI))
|
|
|
|
return 0; // xform done!
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// SimplifyStoreAtEndOfBlock - Turn things like:
|
|
|
|
/// if () { *P = v1; } else { *P = v2 }
|
|
|
|
/// into a phi node with a store in the successor.
|
|
|
|
///
|
|
|
|
/// Simplify things like:
|
|
|
|
/// *P = v1; if () { *P = v2; }
|
|
|
|
/// into a phi node with a store in the successor.
|
|
|
|
///
|
|
|
|
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
|
|
|
BasicBlock *StoreBB = SI.getParent();
|
|
|
|
|
|
|
|
// Check to see if the successor block has exactly two incoming edges. If
|
|
|
|
// so, see if the other predecessor contains a store to the same location.
|
|
|
|
// if so, insert a PHI node (if needed) and move the stores down.
|
|
|
|
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
|
|
|
|
|
|
|
|
// Determine whether Dest has exactly two predecessors and, if so, compute
|
|
|
|
// the other predecessor.
|
|
|
|
pred_iterator PI = pred_begin(DestBB);
|
2010-07-12 23:48:26 +08:00
|
|
|
BasicBlock *P = *PI;
|
2010-01-05 13:57:49 +08:00
|
|
|
BasicBlock *OtherBB = 0;
|
2010-07-12 23:48:26 +08:00
|
|
|
|
|
|
|
if (P != StoreBB)
|
|
|
|
OtherBB = P;
|
|
|
|
|
|
|
|
if (++PI == pred_end(DestBB))
|
2010-01-05 13:57:49 +08:00
|
|
|
return false;
|
|
|
|
|
2010-07-12 23:48:26 +08:00
|
|
|
P = *PI;
|
|
|
|
if (P != StoreBB) {
|
2010-01-05 13:57:49 +08:00
|
|
|
if (OtherBB)
|
|
|
|
return false;
|
2010-07-12 23:48:26 +08:00
|
|
|
OtherBB = P;
|
2010-01-05 13:57:49 +08:00
|
|
|
}
|
|
|
|
if (++PI != pred_end(DestBB))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Bail out if all the relevant blocks aren't distinct (this can happen,
|
|
|
|
// for example, if SI is in an infinite loop)
|
|
|
|
if (StoreBB == DestBB || OtherBB == DestBB)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Verify that the other block ends in a branch and is not otherwise empty.
|
|
|
|
BasicBlock::iterator BBI = OtherBB->getTerminator();
|
|
|
|
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
|
|
|
|
if (!OtherBr || BBI == OtherBB->begin())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the other block ends in an unconditional branch, check for the 'if then
|
|
|
|
// else' case. there is an instruction before the branch.
|
|
|
|
StoreInst *OtherStore = 0;
|
|
|
|
if (OtherBr->isUnconditional()) {
|
|
|
|
--BBI;
|
|
|
|
// Skip over debugging info.
|
2010-01-23 03:05:05 +08:00
|
|
|
while (isa<DbgInfoIntrinsic>(BBI) ||
|
2010-02-16 19:11:14 +08:00
|
|
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
|
2010-01-05 13:57:49 +08:00
|
|
|
if (BBI==OtherBB->begin())
|
|
|
|
return false;
|
|
|
|
--BBI;
|
|
|
|
}
|
2011-08-16 06:09:40 +08:00
|
|
|
// If this isn't a store, isn't a store to the same location, or is not the
|
|
|
|
// right kind of store, bail out.
|
2010-01-05 13:57:49 +08:00
|
|
|
OtherStore = dyn_cast<StoreInst>(BBI);
|
|
|
|
if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
|
2011-08-16 06:09:40 +08:00
|
|
|
!SI.isSameOperationAs(OtherStore))
|
2010-01-05 13:57:49 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// Otherwise, the other block ended with a conditional branch. If one of the
|
|
|
|
// destinations is StoreBB, then we have the if/then case.
|
|
|
|
if (OtherBr->getSuccessor(0) != StoreBB &&
|
|
|
|
OtherBr->getSuccessor(1) != StoreBB)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
|
|
|
|
// if/then triangle. See if there is a store to the same ptr as SI that
|
|
|
|
// lives in OtherBB.
|
|
|
|
for (;; --BBI) {
|
|
|
|
// Check to see if we find the matching store.
|
|
|
|
if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
|
|
|
|
if (OtherStore->getOperand(1) != SI.getOperand(1) ||
|
2011-08-16 06:09:40 +08:00
|
|
|
!SI.isSameOperationAs(OtherStore))
|
2010-01-05 13:57:49 +08:00
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// If we find something that may be using or overwriting the stored
|
|
|
|
// value, or if we run out of instructions, we can't do the xform.
|
|
|
|
if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
|
|
|
|
BBI == OtherBB->begin())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In order to eliminate the store in OtherBr, we have to
|
|
|
|
// make sure nothing reads or overwrites the stored value in
|
|
|
|
// StoreBB.
|
|
|
|
for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
|
|
|
|
// FIXME: This should really be AA driven.
|
|
|
|
if (I->mayReadFromMemory() || I->mayWriteToMemory())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a PHI node now if we need it.
|
|
|
|
Value *MergedVal = OtherStore->getOperand(0);
|
|
|
|
if (MergedVal != SI.getOperand(0)) {
|
2011-03-30 19:28:46 +08:00
|
|
|
PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
|
2010-01-05 13:57:49 +08:00
|
|
|
PN->addIncoming(SI.getOperand(0), SI.getParent());
|
|
|
|
PN->addIncoming(OtherStore->getOperand(0), OtherBB);
|
|
|
|
MergedVal = InsertNewInstBefore(PN, DestBB->front());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance to a place where it is safe to insert the new store and
|
|
|
|
// insert it.
|
2011-08-17 04:45:24 +08:00
|
|
|
BBI = DestBB->getFirstInsertionPt();
|
2011-05-27 08:19:40 +08:00
|
|
|
StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
|
2011-08-16 06:09:40 +08:00
|
|
|
SI.isVolatile(),
|
|
|
|
SI.getAlignment(),
|
|
|
|
SI.getOrdering(),
|
|
|
|
SI.getSynchScope());
|
2011-05-27 08:19:40 +08:00
|
|
|
InsertNewInstBefore(NewSI, *BBI);
|
|
|
|
NewSI->setDebugLoc(OtherStore->getDebugLoc());
|
|
|
|
|
2010-01-05 13:57:49 +08:00
|
|
|
// Nuke the old stores.
|
|
|
|
EraseInstFromFunction(SI);
|
|
|
|
EraseInstFromFunction(*OtherStore);
|
|
|
|
return true;
|
|
|
|
}
|