forked from OSchip/llvm-project
[GVN] non-functional code movement
Summary: Move some code around, in preparation for later fixes to the non-integral addrspace handling (D59661) Patch By Jameson Nash <jameson@juliacomputing.com> Reviewed By: reames, loladiro Differential Revision: https://reviews.llvm.org/D59729 llvm-svn: 362853
This commit is contained in:
parent
ddd2c9ac86
commit
eb4a561fa3
|
@ -859,11 +859,12 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
|
|
||||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||||
|
|
||||||
|
Instruction *DepInst = DepInfo.getInst();
|
||||||
if (DepInfo.isClobber()) {
|
if (DepInfo.isClobber()) {
|
||||||
// If the dependence is to a store that writes to a superset of the bits
|
// If the dependence is to a store that writes to a superset of the bits
|
||||||
// read by the load, we can extract the bits we need for the load from the
|
// read by the load, we can extract the bits we need for the load from the
|
||||||
// stored value.
|
// stored value.
|
||||||
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
|
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
|
||||||
// Can't forward from non-atomic to atomic without violating memory model.
|
// Can't forward from non-atomic to atomic without violating memory model.
|
||||||
if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
|
if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
|
||||||
int Offset =
|
int Offset =
|
||||||
|
@ -879,7 +880,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
// load i32* P
|
// load i32* P
|
||||||
// load i8* (P+1)
|
// load i8* (P+1)
|
||||||
// if we have this, replace the later with an extraction from the former.
|
// if we have this, replace the later with an extraction from the former.
|
||||||
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
|
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
|
||||||
// If this is a clobber and L is the first instruction in its block, then
|
// If this is a clobber and L is the first instruction in its block, then
|
||||||
// we have the first instruction in the entry block.
|
// we have the first instruction in the entry block.
|
||||||
// Can't forward from non-atomic to atomic without violating memory model.
|
// Can't forward from non-atomic to atomic without violating memory model.
|
||||||
|
@ -896,7 +897,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
|
|
||||||
// If the clobbering value is a memset/memcpy/memmove, see if we can
|
// If the clobbering value is a memset/memcpy/memmove, see if we can
|
||||||
// forward a value on from it.
|
// forward a value on from it.
|
||||||
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
|
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
|
||||||
if (Address && !LI->isAtomic()) {
|
if (Address && !LI->isAtomic()) {
|
||||||
int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
|
int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
|
||||||
DepMI, DL);
|
DepMI, DL);
|
||||||
|
@ -910,8 +911,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
LLVM_DEBUG(
|
LLVM_DEBUG(
|
||||||
// fast print dep, using operator<< on instruction is too slow.
|
// fast print dep, using operator<< on instruction is too slow.
|
||||||
dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
|
dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
|
||||||
Instruction *I = DepInfo.getInst();
|
dbgs() << " is clobbered by " << *DepInst << '\n';);
|
||||||
dbgs() << " is clobbered by " << *I << '\n';);
|
|
||||||
if (ORE->allowExtraAnalysis(DEBUG_TYPE))
|
if (ORE->allowExtraAnalysis(DEBUG_TYPE))
|
||||||
reportMayClobberedLoad(LI, DepInfo, DT, ORE);
|
reportMayClobberedLoad(LI, DepInfo, DT, ORE);
|
||||||
|
|
||||||
|
@ -919,8 +919,6 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
}
|
}
|
||||||
assert(DepInfo.isDef() && "follows from above");
|
assert(DepInfo.isDef() && "follows from above");
|
||||||
|
|
||||||
Instruction *DepInst = DepInfo.getInst();
|
|
||||||
|
|
||||||
// Loading the allocation -> undef.
|
// Loading the allocation -> undef.
|
||||||
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
|
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
|
||||||
// Loading immediately after lifetime begin -> undef.
|
// Loading immediately after lifetime begin -> undef.
|
||||||
|
@ -939,9 +937,8 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
// Reject loads and stores that are to the same address but are of
|
// Reject loads and stores that are to the same address but are of
|
||||||
// different types if we have to. If the stored value is larger or equal to
|
// different types if we have to. If the stored value is larger or equal to
|
||||||
// the loaded value, we can reuse it.
|
// the loaded value, we can reuse it.
|
||||||
if (S->getValueOperand()->getType() != LI->getType() &&
|
if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
|
||||||
!canCoerceMustAliasedValueToLoad(S->getValueOperand(),
|
DL))
|
||||||
LI->getType(), DL))
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Can't forward from non-atomic to atomic without violating memory model.
|
// Can't forward from non-atomic to atomic without violating memory model.
|
||||||
|
@ -956,8 +953,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||||
// If the types mismatch and we can't handle it, reject reuse of the load.
|
// If the types mismatch and we can't handle it, reject reuse of the load.
|
||||||
// If the stored value is larger or equal to the loaded value, we can reuse
|
// If the stored value is larger or equal to the loaded value, we can reuse
|
||||||
// it.
|
// it.
|
||||||
if (LD->getType() != LI->getType() &&
|
if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
|
||||||
!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Can't forward from non-atomic to atomic without violating memory model.
|
// Can't forward from non-atomic to atomic without violating memory model.
|
||||||
|
|
|
@ -14,13 +14,17 @@ namespace VNCoercion {
|
||||||
/// Return true if coerceAvailableValueToLoadType will succeed.
|
/// Return true if coerceAvailableValueToLoadType will succeed.
|
||||||
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
|
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
|
||||||
const DataLayout &DL) {
|
const DataLayout &DL) {
|
||||||
|
Type *StoredTy = StoredVal->getType();
|
||||||
|
if (StoredTy == LoadTy)
|
||||||
|
return true;
|
||||||
|
|
||||||
// If the loaded or stored value is an first class array or struct, don't try
|
// If the loaded or stored value is an first class array or struct, don't try
|
||||||
// to transform them. We need to be able to bitcast to integer.
|
// to transform them. We need to be able to bitcast to integer.
|
||||||
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
|
if (LoadTy->isStructTy() || LoadTy->isArrayTy() || StoredTy->isStructTy() ||
|
||||||
StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy())
|
StoredTy->isArrayTy())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
uint64_t StoreSize = DL.getTypeSizeInBits(StoredVal->getType());
|
uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy);
|
||||||
|
|
||||||
// The store size must be byte-aligned to support future type casts.
|
// The store size must be byte-aligned to support future type casts.
|
||||||
if (llvm::alignTo(StoreSize, 8) != StoreSize)
|
if (llvm::alignTo(StoreSize, 8) != StoreSize)
|
||||||
|
@ -306,7 +310,7 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
|
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
|
||||||
if (!GV || !GV->isConstant())
|
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
// See if the access is within the bounds of the transfer.
|
// See if the access is within the bounds of the transfer.
|
||||||
|
|
Loading…
Reference in New Issue