2004-10-07 12:16:33 +08:00
|
|
|
//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-10-07 12:16:33 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-10-07 12:16:33 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass transforms simple global variables that never have their address
|
|
|
|
// taken. If obviously true, it marks read/write globals as constant, deletes
|
|
|
|
// variables only stored to, etc.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-04-26 08:28:01 +08:00
|
|
|
#include "llvm/Transforms/IPO/GlobalOpt.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/ADT/Twine.h"
|
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2018-01-31 00:17:22 +08:00
|
|
|
#include "llvm/Analysis/BlockFrequencyInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/ConstantFolding.h"
|
|
|
|
#include "llvm/Analysis/MemoryBuiltins.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2018-01-31 00:17:22 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2018-06-05 05:23:21 +08:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/BinaryFormat/Dwarf.h"
|
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/CallingConv.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/IR/Constant.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-08-04 12:51:15 +08:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2015-11-15 22:21:37 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
2014-03-04 18:40:04 +08:00
|
|
|
#include "llvm/IR/GetElementPtrTypeIterator.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
|
|
#include "llvm/IR/GlobalValue.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/IR/User.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
2014-03-04 19:17:44 +08:00
|
|
|
#include "llvm/IR/ValueHandle.h"
|
2004-10-07 12:16:33 +08:00
|
|
|
#include "llvm/Pass.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2018-01-31 00:17:22 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2007-01-31 07:46:24 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2008-04-26 15:40:11 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2014-01-07 19:48:04 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-04-26 08:28:01 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2014-05-03 02:35:25 +08:00
|
|
|
#include "llvm/Transforms/Utils/CtorUtils.h"
|
2016-02-03 10:51:00 +08:00
|
|
|
#include "llvm/Transforms/Utils/Evaluator.h"
|
2013-10-22 01:14:55 +08:00
|
|
|
#include "llvm/Transforms/Utils/GlobalStatus.h"
|
2017-10-11 06:49:55 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2004-10-07 12:16:33 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "globalopt"
|
|
|
|
|
2006-12-20 06:09:18 +08:00
|
|
|
STATISTIC(NumMarked , "Number of globals marked constant");
|
2011-01-20 00:32:21 +08:00
|
|
|
STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
|
2006-12-20 06:09:18 +08:00
|
|
|
STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
|
|
|
|
STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
|
|
|
|
STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
|
|
|
|
STATISTIC(NumDeleted , "Number of globals deleted");
|
|
|
|
STATISTIC(NumGlobUses , "Number of global uses devirtualized");
|
2013-10-08 03:03:24 +08:00
|
|
|
STATISTIC(NumLocalized , "Number of globals localized");
|
2006-12-20 06:09:18 +08:00
|
|
|
STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
|
|
|
|
STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
|
|
|
|
STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
|
2008-02-17 04:56:04 +08:00
|
|
|
STATISTIC(NumNestRemoved , "Number of nest attributes removed");
|
2009-02-15 17:56:08 +08:00
|
|
|
STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
|
|
|
|
STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
|
2011-03-21 01:59:11 +08:00
|
|
|
STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
|
2018-01-31 00:17:22 +08:00
|
|
|
STATISTIC(NumInternalFunc, "Number of internal functions");
|
|
|
|
STATISTIC(NumColdCC, "Number of functions marked coldcc");
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableColdCCStressTest("enable-coldcc-stress-test",
|
|
|
|
cl::desc("Enable stress test of coldcc by adding "
|
|
|
|
"calling conv to all internal functions."),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<int> ColdCCRelFreq(
|
|
|
|
"coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
|
|
|
|
cl::desc(
|
|
|
|
"Maximum block frequency, expressed as a percentage of caller's "
|
|
|
|
"entry frequency, for a call site to be considered cold for enabling"
|
|
|
|
"coldcc"));
|
2004-10-07 12:16:33 +08:00
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Is this global variable possibly used by a leak checker as a root? If so,
|
|
|
|
/// we might not really want to eliminate the stores to it.
|
2012-07-24 15:21:08 +08:00
|
|
|
static bool isLeakCheckerRoot(GlobalVariable *GV) {
|
|
|
|
// A global variable is a root if it is a pointer, or could plausibly contain
|
|
|
|
// a pointer. There are two challenges; one is that we could have a struct
|
|
|
|
// the has an inner member which is a pointer. We recurse through the type to
|
|
|
|
// detect these (up to a point). The other is that we may actually be a union
|
|
|
|
// of a pointer and another type, and so our LLVM type is an integer which
|
|
|
|
// gets converted into a pointer, or our type is an [i8 x #] with a pointer
|
|
|
|
// potentially contained here.
|
|
|
|
|
|
|
|
if (GV->hasPrivateLinkage())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SmallVector<Type *, 4> Types;
|
2016-01-17 04:30:46 +08:00
|
|
|
Types.push_back(GV->getValueType());
|
2012-07-24 15:21:08 +08:00
|
|
|
|
|
|
|
unsigned Limit = 20;
|
|
|
|
do {
|
|
|
|
Type *Ty = Types.pop_back_val();
|
|
|
|
switch (Ty->getTypeID()) {
|
|
|
|
default: break;
|
|
|
|
case Type::PointerTyID: return true;
|
|
|
|
case Type::ArrayTyID:
|
|
|
|
case Type::VectorTyID: {
|
|
|
|
SequentialType *STy = cast<SequentialType>(Ty);
|
|
|
|
Types.push_back(STy->getElementType());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Type::StructTyID: {
|
|
|
|
StructType *STy = cast<StructType>(Ty);
|
|
|
|
if (STy->isOpaque()) return true;
|
|
|
|
for (StructType::element_iterator I = STy->element_begin(),
|
|
|
|
E = STy->element_end(); I != E; ++I) {
|
|
|
|
Type *InnerTy = *I;
|
|
|
|
if (isa<PointerType>(InnerTy)) return true;
|
|
|
|
if (isa<CompositeType>(InnerTy))
|
|
|
|
Types.push_back(InnerTy);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (--Limit == 0) return true;
|
|
|
|
} while (!Types.empty());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given a value that is stored to a global but never read, determine whether
|
|
|
|
/// it's safe to remove the store and the chain of computation that feeds the
|
|
|
|
/// store.
|
2012-08-29 23:32:21 +08:00
|
|
|
static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
|
2012-07-24 15:21:08 +08:00
|
|
|
do {
|
|
|
|
if (isa<Constant>(V))
|
|
|
|
return true;
|
|
|
|
if (!V->hasOneUse())
|
|
|
|
return false;
|
2012-07-26 05:19:40 +08:00
|
|
|
if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
|
|
|
|
isa<GlobalValue>(V))
|
2012-07-24 15:21:08 +08:00
|
|
|
return false;
|
2012-08-29 23:32:21 +08:00
|
|
|
if (isAllocationFn(V, TLI))
|
2012-07-24 15:21:08 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
Instruction *I = cast<Instruction>(V);
|
|
|
|
if (I->mayHaveSideEffects())
|
|
|
|
return false;
|
|
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
|
|
|
|
if (!GEP->hasAllConstantIndices())
|
|
|
|
return false;
|
|
|
|
} else if (I->getNumOperands() != 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
V = I->getOperand(0);
|
2017-10-11 06:49:55 +08:00
|
|
|
} while (true);
|
2012-07-24 15:21:08 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// This GV is a pointer root. Loop over all users of the global and clean up
|
|
|
|
/// any that obviously don't assign the global a value that isn't dynamically
|
|
|
|
/// allocated.
|
2012-08-29 23:32:21 +08:00
|
|
|
static bool CleanupPointerRootUsers(GlobalVariable *GV,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2012-07-24 15:21:08 +08:00
|
|
|
// A brief explanation of leak checkers. The goal is to find bugs where
|
|
|
|
// pointers are forgotten, causing an accumulating growth in memory
|
|
|
|
// usage over time. The common strategy for leak checkers is to whitelist the
|
|
|
|
// memory pointed to by globals at exit. This is popular because it also
|
|
|
|
// solves another problem where the main thread of a C++ program may shut down
|
|
|
|
// before other threads that are still expecting to use those globals. To
|
|
|
|
// handle that case, we expect the program may create a singleton and never
|
|
|
|
// destroy it.
|
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// If Dead[n].first is the only use of a malloc result, we can delete its
|
|
|
|
// chain of computation and the store to the global in Dead[n].second.
|
|
|
|
SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
|
|
|
|
|
|
|
|
// Constants can't be pointers to dynamically allocated memory.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
|
2012-07-24 15:21:08 +08:00
|
|
|
UI != E;) {
|
|
|
|
User *U = *UI++;
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
|
|
|
Value *V = SI->getValueOperand();
|
|
|
|
if (isa<Constant>(V)) {
|
|
|
|
Changed = true;
|
|
|
|
SI->eraseFromParent();
|
|
|
|
} else if (Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
if (I->hasOneUse())
|
|
|
|
Dead.push_back(std::make_pair(I, SI));
|
|
|
|
}
|
|
|
|
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
|
|
|
|
if (isa<Constant>(MSI->getValue())) {
|
|
|
|
Changed = true;
|
|
|
|
MSI->eraseFromParent();
|
|
|
|
} else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
|
|
|
|
if (I->hasOneUse())
|
|
|
|
Dead.push_back(std::make_pair(I, MSI));
|
|
|
|
}
|
|
|
|
} else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
|
|
|
|
GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
|
|
|
|
if (MemSrc && MemSrc->isConstant()) {
|
|
|
|
Changed = true;
|
|
|
|
MTI->eraseFromParent();
|
|
|
|
} else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
|
|
|
|
if (I->hasOneUse())
|
|
|
|
Dead.push_back(std::make_pair(I, MTI));
|
|
|
|
}
|
|
|
|
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
|
|
|
|
if (CE->use_empty()) {
|
|
|
|
CE->destroyConstant();
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
} else if (Constant *C = dyn_cast<Constant>(U)) {
|
2013-10-18 02:06:32 +08:00
|
|
|
if (isSafeToDestroyConstant(C)) {
|
2012-07-24 15:21:08 +08:00
|
|
|
C->destroyConstant();
|
|
|
|
// This could have invalidated UI, start over from scratch.
|
|
|
|
Dead.clear();
|
2012-08-29 23:32:21 +08:00
|
|
|
CleanupPointerRootUsers(GV, TLI);
|
2012-07-24 15:21:08 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0, e = Dead.size(); i != e; ++i) {
|
2012-08-29 23:32:21 +08:00
|
|
|
if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
|
2012-07-24 15:21:08 +08:00
|
|
|
Dead[i].second->eraseFromParent();
|
|
|
|
Instruction *I = Dead[i].first;
|
|
|
|
do {
|
2013-01-12 07:08:52 +08:00
|
|
|
if (isAllocationFn(I, TLI))
|
|
|
|
break;
|
2012-07-24 15:21:08 +08:00
|
|
|
Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
|
|
|
|
if (!J)
|
|
|
|
break;
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = J;
|
2017-10-11 06:49:55 +08:00
|
|
|
} while (true);
|
2012-07-24 15:21:08 +08:00
|
|
|
I->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// We just marked GV constant. Loop over all users of the global, cleaning up
|
|
|
|
/// the obvious ones. This is largely just a quick scan over the use list to
|
|
|
|
/// clean up the easy and obvious cruft. This returns true if it made a change.
|
2012-02-12 09:13:18 +08:00
|
|
|
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
2015-03-05 02:43:29 +08:00
|
|
|
const DataLayout &DL,
|
2014-02-25 07:12:18 +08:00
|
|
|
TargetLibraryInfo *TLI) {
|
2004-10-11 00:43:46 +08:00
|
|
|
bool Changed = false;
|
2013-12-13 04:45:24 +08:00
|
|
|
// Note that we need to use a weak value handle for the worklist items. When
|
|
|
|
// we delete a constant array, we may also be holding pointer to one of its
|
|
|
|
// elements (or an element of one of its elements if we're dealing with an
|
|
|
|
// array of arrays) in the worklist.
|
2017-05-02 01:07:49 +08:00
|
|
|
SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end());
|
2013-04-02 16:16:45 +08:00
|
|
|
while (!WorkList.empty()) {
|
2013-12-13 04:45:24 +08:00
|
|
|
Value *UV = WorkList.pop_back_val();
|
|
|
|
if (!UV)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
User *U = cast<User>(UV);
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2004-10-07 12:16:33 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
2005-02-28 02:58:52 +08:00
|
|
|
if (Init) {
|
|
|
|
// Replace the load with the initializer.
|
|
|
|
LI->replaceAllUsesWith(Init);
|
|
|
|
LI->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
}
|
2004-10-07 12:16:33 +08:00
|
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
|
|
|
// Store must be unreachable or storing Init into the global.
|
2004-10-17 02:09:00 +08:00
|
|
|
SI->eraseFromParent();
|
2004-10-11 00:43:46 +08:00
|
|
|
Changed = true;
|
2004-10-07 12:16:33 +08:00
|
|
|
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
|
|
|
|
if (CE->getOpcode() == Instruction::GetElementPtr) {
|
2014-04-25 13:29:35 +08:00
|
|
|
Constant *SubInit = nullptr;
|
2005-09-26 15:34:35 +08:00
|
|
|
if (Init)
|
2009-10-06 00:36:26 +08:00
|
|
|
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
|
2014-02-21 08:06:31 +08:00
|
|
|
Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
|
2014-01-03 04:01:43 +08:00
|
|
|
} else if ((CE->getOpcode() == Instruction::BitCast &&
|
|
|
|
CE->getType()->isPointerTy()) ||
|
|
|
|
CE->getOpcode() == Instruction::AddrSpaceCast) {
|
2005-02-28 02:58:52 +08:00
|
|
|
// Pointer cast, delete any stores and memsets to the global.
|
2014-04-25 13:29:35 +08:00
|
|
|
Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
|
2004-10-07 12:16:33 +08:00
|
|
|
}
|
2005-02-28 02:58:52 +08:00
|
|
|
|
|
|
|
if (CE->use_empty()) {
|
|
|
|
CE->destroyConstant();
|
|
|
|
Changed = true;
|
2004-10-11 00:47:33 +08:00
|
|
|
}
|
2005-02-28 02:58:52 +08:00
|
|
|
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
|
2007-11-10 01:33:02 +08:00
|
|
|
// Do not transform "gepinst (gep constexpr (GV))" here, because forming
|
|
|
|
// "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
|
|
|
|
// and will invalidate our notion of what Init is.
|
2014-04-25 13:29:35 +08:00
|
|
|
Constant *SubInit = nullptr;
|
2007-11-10 01:33:02 +08:00
|
|
|
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
|
2015-03-05 02:43:29 +08:00
|
|
|
ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
|
2015-03-10 10:37:25 +08:00
|
|
|
ConstantFoldInstruction(GEP, DL, TLI));
|
2007-11-10 01:33:02 +08:00
|
|
|
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
|
2009-10-06 00:36:26 +08:00
|
|
|
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
|
2012-03-28 22:50:09 +08:00
|
|
|
|
|
|
|
// If the initializer is an all-null value and we have an inbounds GEP,
|
|
|
|
// we already know what the result of any load from that GEP is.
|
|
|
|
// TODO: Handle splats.
|
|
|
|
if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
|
2016-01-20 01:28:00 +08:00
|
|
|
SubInit = Constant::getNullValue(GEP->getResultElementType());
|
2007-11-10 01:33:02 +08:00
|
|
|
}
|
2014-02-21 08:06:31 +08:00
|
|
|
Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
|
2004-10-11 00:47:33 +08:00
|
|
|
|
2004-10-11 00:43:46 +08:00
|
|
|
if (GEP->use_empty()) {
|
2004-10-17 02:09:00 +08:00
|
|
|
GEP->eraseFromParent();
|
2004-10-11 00:43:46 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
2005-02-28 02:58:52 +08:00
|
|
|
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
|
|
|
|
if (MI->getRawDest() == V) {
|
|
|
|
MI->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
|
2004-10-09 04:59:28 +08:00
|
|
|
} else if (Constant *C = dyn_cast<Constant>(U)) {
|
|
|
|
// If we have a chain of dead constantexprs or other things dangling from
|
|
|
|
// us, and if they are all dead, nuke them without remorse.
|
2013-10-18 02:06:32 +08:00
|
|
|
if (isSafeToDestroyConstant(C)) {
|
2009-03-06 09:37:41 +08:00
|
|
|
C->destroyConstant();
|
2014-02-21 08:06:31 +08:00
|
|
|
CleanupConstantGlobalUsers(V, Init, DL, TLI);
|
2004-10-11 00:43:46 +08:00
|
|
|
return true;
|
2004-10-09 04:59:28 +08:00
|
|
|
}
|
2004-10-07 12:16:33 +08:00
|
|
|
}
|
|
|
|
}
|
2004-10-11 00:43:46 +08:00
|
|
|
return Changed;
|
2004-10-07 12:16:33 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Return true if the specified instruction is a safe user of a derived
|
|
|
|
/// expression from a global that we want to SROA.
|
2008-01-14 10:09:12 +08:00
|
|
|
static bool isSafeSROAElementUse(Value *V) {
|
|
|
|
// We might have a dead and dangling constant hanging off of here.
|
|
|
|
if (Constant *C = dyn_cast<Constant>(V))
|
2013-10-18 02:06:32 +08:00
|
|
|
return isSafeToDestroyConstant(C);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
Instruction *I = dyn_cast<Instruction>(V);
|
|
|
|
if (!I) return false;
|
|
|
|
|
|
|
|
// Loads are ok.
|
|
|
|
if (isa<LoadInst>(I)) return true;
|
|
|
|
|
|
|
|
// Stores *to* the pointer are ok.
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(I))
|
|
|
|
return SI->getOperand(0) != V;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
// Otherwise, it must be a GEP.
|
|
|
|
GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!GEPI) return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
|
|
|
|
!cast<Constant>(GEPI->getOperand(1))->isNullValue())
|
|
|
|
return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : GEPI->users())
|
|
|
|
if (!isSafeSROAElementUse(U))
|
2008-01-14 10:09:12 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
2008-01-14 09:31:05 +08:00
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// U is a direct user of the specified global value. Look at it and its uses
|
|
|
|
/// and decide whether it is safe to SROA this global.
|
2008-01-14 10:09:12 +08:00
|
|
|
static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
|
|
|
|
// The user of the global must be a GEP Inst or a ConstantExpr GEP.
|
2010-10-19 05:16:00 +08:00
|
|
|
if (!isa<GetElementPtrInst>(U) &&
|
|
|
|
(!isa<ConstantExpr>(U) ||
|
2008-01-14 10:09:12 +08:00
|
|
|
cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
|
|
|
|
return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
// Check to see if this ConstantExpr GEP is SRA'able. In particular, we
|
|
|
|
// don't like < 3 operand CE's, and we don't like non-constant integer
|
|
|
|
// indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
|
|
|
|
// value of C.
|
|
|
|
if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
|
|
|
|
!cast<Constant>(U->getOperand(1))->isNullValue() ||
|
|
|
|
!isa<ConstantInt>(U->getOperand(2)))
|
|
|
|
return false;
|
2008-01-14 09:31:05 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
|
|
|
|
++GEPI; // Skip over the pointer index.
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
// If this is a use of an array allocation, do a bit more checking for sanity.
|
2016-12-02 10:24:42 +08:00
|
|
|
if (GEPI.isSequential()) {
|
2008-01-14 10:09:12 +08:00
|
|
|
ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
// Check to make sure that index falls within the array. If not,
|
|
|
|
// something funny is going on, so we won't do the optimization.
|
|
|
|
//
|
2016-12-02 10:24:42 +08:00
|
|
|
if (GEPI.isBoundedSequential() &&
|
|
|
|
Idx->getZExtValue() >= GEPI.getSequentialNumElements())
|
2008-01-14 10:09:12 +08:00
|
|
|
return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
// We cannot scalar repl this level of the array unless any array
|
|
|
|
// sub-indices are in-range constants. In particular, consider:
|
|
|
|
// A[0][i]. We cannot know that the user isn't doing invalid things like
|
|
|
|
// allowing i to index an out-of-range subscript that accesses A[1].
|
|
|
|
//
|
|
|
|
// Scalar replacing *just* the outer index of the array is probably not
|
|
|
|
// going to be a win anyway, so just give up.
|
|
|
|
for (++GEPI; // Skip array index.
|
2009-08-18 22:58:19 +08:00
|
|
|
GEPI != E;
|
2008-01-14 10:09:12 +08:00
|
|
|
++GEPI) {
|
2016-12-02 10:24:42 +08:00
|
|
|
if (GEPI.isStruct())
|
2009-08-18 22:58:19 +08:00
|
|
|
continue;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-01-14 10:09:12 +08:00
|
|
|
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
|
2016-12-02 10:24:42 +08:00
|
|
|
if (!IdxVal ||
|
|
|
|
(GEPI.isBoundedSequential() &&
|
|
|
|
IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
|
2008-01-14 09:31:05 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2008-01-14 10:09:12 +08:00
|
|
|
|
2017-08-09 17:23:29 +08:00
|
|
|
return llvm::all_of(U->users(),
|
|
|
|
[](User *UU) { return isSafeSROAElementUse(UU); });
|
2008-01-14 10:09:12 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Look at all uses of the global and decide whether it is safe for us to
|
|
|
|
/// perform this transformation.
|
2008-01-14 10:09:12 +08:00
|
|
|
static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : GV->users())
|
|
|
|
if (!IsUserOfGlobalSafeForSRA(U, GV))
|
2008-01-14 10:09:12 +08:00
|
|
|
return false;
|
2014-03-09 11:16:01 +08:00
|
|
|
|
2008-01-14 09:31:05 +08:00
|
|
|
return true;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2017-08-04 12:51:15 +08:00
|
|
|
/// Copy over the debug info for a variable to its SRA replacements.
|
|
|
|
static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
|
|
|
|
uint64_t FragmentOffsetInBits,
|
2017-08-31 08:06:18 +08:00
|
|
|
uint64_t FragmentSizeInBits,
|
|
|
|
unsigned NumElements) {
|
2017-08-04 12:51:15 +08:00
|
|
|
SmallVector<DIGlobalVariableExpression *, 1> GVs;
|
|
|
|
GV->getDebugInfo(GVs);
|
|
|
|
for (auto *GVE : GVs) {
|
|
|
|
DIVariable *Var = GVE->getVariable();
|
|
|
|
DIExpression *Expr = GVE->getExpression();
|
2017-11-07 08:45:34 +08:00
|
|
|
if (NumElements > 1) {
|
|
|
|
if (auto E = DIExpression::createFragmentExpression(
|
|
|
|
Expr, FragmentOffsetInBits, FragmentSizeInBits))
|
|
|
|
Expr = *E;
|
|
|
|
else
|
|
|
|
return;
|
|
|
|
}
|
2017-08-31 08:06:18 +08:00
|
|
|
auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
|
2017-08-04 12:51:15 +08:00
|
|
|
NGV->addDebugInfo(NGVE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Perform scalar replacement of aggregates on the specified global variable.
|
|
|
|
/// This opens the door for other optimizations by exposing the behavior of the
|
|
|
|
/// program in a more fine-grained way. We have determined that this
|
|
|
|
/// transformation is safe already. We return the first global variable we
|
2004-10-09 01:32:09 +08:00
|
|
|
/// insert so that the caller can reprocess it.
|
2014-02-21 08:06:31 +08:00
|
|
|
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
|
2008-01-14 09:31:05 +08:00
|
|
|
// Make sure this global only has simple uses that we can SRA.
|
2008-01-14 10:09:12 +08:00
|
|
|
if (!GlobalUsersSafeToSRA(GV))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2016-04-25 18:48:29 +08:00
|
|
|
assert(GV->hasLocalLinkage());
|
2004-10-09 01:32:09 +08:00
|
|
|
Constant *Init = GV->getInitializer();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Ty = Init->getType();
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
std::vector<GlobalVariable *> NewGlobals;
|
2004-10-09 01:32:09 +08:00
|
|
|
Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
|
|
|
|
|
2008-04-26 15:40:11 +08:00
|
|
|
// Get the alignment of the global, either explicit or target-specific.
|
|
|
|
unsigned StartAlignment = GV->getAlignment();
|
|
|
|
if (StartAlignment == 0)
|
2014-02-21 08:06:31 +08:00
|
|
|
StartAlignment = DL.getABITypeAlignment(GV->getType());
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
2017-08-31 08:06:18 +08:00
|
|
|
unsigned NumElements = STy->getNumElements();
|
|
|
|
NewGlobals.reserve(NumElements);
|
2014-02-21 08:06:31 +08:00
|
|
|
const StructLayout &Layout = *DL.getStructLayout(STy);
|
2017-08-31 08:06:18 +08:00
|
|
|
for (unsigned i = 0, e = NumElements; i != e; ++i) {
|
2012-01-25 14:48:06 +08:00
|
|
|
Constant *In = Init->getAggregateElement(i);
|
2004-10-09 01:32:09 +08:00
|
|
|
assert(In && "Couldn't get element of initializer?");
|
2009-11-06 12:27:31 +08:00
|
|
|
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
|
2004-10-09 01:32:09 +08:00
|
|
|
GlobalVariable::InternalLinkage,
|
2009-07-31 01:37:43 +08:00
|
|
|
In, GV->getName()+"."+Twine(i),
|
2012-06-23 19:37:03 +08:00
|
|
|
GV->getThreadLocalMode(),
|
2009-07-08 09:26:06 +08:00
|
|
|
GV->getType()->getAddressSpace());
|
2015-11-10 00:47:16 +08:00
|
|
|
NGV->setExternallyInitialized(GV->isExternallyInitialized());
|
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
Summary:
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
A good example of improper behavior in the current implementation is section information associated with the GlobalObject. If a section was set for it, and GlobalOpt is creating/modifying a new object based on this one (often copying the original name), without this change new object will be placed in a default section, resulting in inappropriate properties of the new variable.
The argument here is that if customer specified a section for a variable, any changes to it that compiler does should not cause it to change that section allocation.
Moreover, any other properties worth representation in copyAttributesFrom() should also be propagated.
Reviewers: jmolloy, joker-eph, joker.eph
Subscribers: slarin, joker.eph, rafael, tobiasvk, llvm-commits
Differential Revision: http://reviews.llvm.org/D16074
llvm-svn: 258556
2016-01-23 05:18:20 +08:00
|
|
|
NGV->copyAttributesFrom(GV);
|
2015-12-23 03:16:50 +08:00
|
|
|
Globals.push_back(NGV);
|
2004-10-09 01:32:09 +08:00
|
|
|
NewGlobals.push_back(NGV);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-04-26 15:40:11 +08:00
|
|
|
// Calculate the known alignment of the field. If the original aggregate
|
|
|
|
// had 256 byte alignment for example, something might depend on that:
|
|
|
|
// propagate info to each field.
|
|
|
|
uint64_t FieldOffset = Layout.getElementOffset(i);
|
|
|
|
unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
|
2014-02-21 08:06:31 +08:00
|
|
|
if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
|
2008-04-26 15:40:11 +08:00
|
|
|
NGV->setAlignment(NewAlign);
|
2017-08-04 12:51:15 +08:00
|
|
|
|
|
|
|
// Copy over the debug info for the variable.
|
[GlobalOpt] Include padding in debug fragments
Summary:
When creating the debug fragments for a SRA'd variable, use the types'
allocation sizes. This fixes issues where the pass would emit too small
fragments, placed at the wrong offset, for padded types.
An example of this is long double on x86. The type is represented using
x86_fp80, which is 10 bytes, but the value is aligned to 12/16 bytes.
The padding is included in the type's DW_AT_byte_size attribute;
therefore, the fragments should also include that. Newer GCC releases
(I tested 7.2.0) emit 12/16-byte pieces for long double. Earlier
releases, e.g. GCC 5.5.0, behaved as LLVM did, i.e. by emitting a
10-byte piece, followed by an empty 2/6-byte piece for the padding.
Failing to cover all `DW_AT_byte_size' bytes of a value with non-empty
pieces results in the value being printed as <optimized out> by GDB.
Patch by: David Stenberg
Reviewers: aprantl, JDevlieghere
Reviewed By: aprantl, JDevlieghere
Subscribers: llvm-commits
Tags: #debug-info
Differential Revision: https://reviews.llvm.org/D42807
llvm-svn: 324066
2018-02-02 18:34:13 +08:00
|
|
|
uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType());
|
2018-01-25 18:09:26 +08:00
|
|
|
uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(i);
|
|
|
|
transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, NumElements);
|
2004-10-09 01:32:09 +08:00
|
|
|
}
|
2011-07-18 12:54:35 +08:00
|
|
|
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
|
2016-12-02 11:20:58 +08:00
|
|
|
unsigned NumElements = STy->getNumElements();
|
2005-02-24 00:53:04 +08:00
|
|
|
if (NumElements > 16 && GV->hasNUsesOrMore(16))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr; // It's not worth it.
|
2004-10-09 01:32:09 +08:00
|
|
|
NewGlobals.reserve(NumElements);
|
2017-08-04 12:51:15 +08:00
|
|
|
auto ElTy = STy->getElementType();
|
|
|
|
uint64_t EltSize = DL.getTypeAllocSize(ElTy);
|
|
|
|
unsigned EltAlign = DL.getABITypeAlignment(ElTy);
|
[GlobalOpt] Include padding in debug fragments
Summary:
When creating the debug fragments for a SRA'd variable, use the types'
allocation sizes. This fixes issues where the pass would emit too small
fragments, placed at the wrong offset, for padded types.
An example of this is long double on x86. The type is represented using
x86_fp80, which is 10 bytes, but the value is aligned to 12/16 bytes.
The padding is included in the type's DW_AT_byte_size attribute;
therefore, the fragments should also include that. Newer GCC releases
(I tested 7.2.0) emit 12/16-byte pieces for long double. Earlier
releases, e.g. GCC 5.5.0, behaved as LLVM did, i.e. by emitting a
10-byte piece, followed by an empty 2/6-byte piece for the padding.
Failing to cover all `DW_AT_byte_size' bytes of a value with non-empty
pieces results in the value being printed as <optimized out> by GDB.
Patch by: David Stenberg
Reviewers: aprantl, JDevlieghere
Reviewed By: aprantl, JDevlieghere
Subscribers: llvm-commits
Tags: #debug-info
Differential Revision: https://reviews.llvm.org/D42807
llvm-svn: 324066
2018-02-02 18:34:13 +08:00
|
|
|
uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy);
|
2004-10-09 01:32:09 +08:00
|
|
|
for (unsigned i = 0, e = NumElements; i != e; ++i) {
|
2012-01-25 14:48:06 +08:00
|
|
|
Constant *In = Init->getAggregateElement(i);
|
2004-10-09 01:32:09 +08:00
|
|
|
assert(In && "Couldn't get element of initializer?");
|
|
|
|
|
2009-11-06 12:27:31 +08:00
|
|
|
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
|
2004-10-09 01:32:09 +08:00
|
|
|
GlobalVariable::InternalLinkage,
|
2009-07-31 01:37:43 +08:00
|
|
|
In, GV->getName()+"."+Twine(i),
|
2012-06-23 19:37:03 +08:00
|
|
|
GV->getThreadLocalMode(),
|
2009-07-09 03:03:57 +08:00
|
|
|
GV->getType()->getAddressSpace());
|
2015-11-10 00:47:16 +08:00
|
|
|
NGV->setExternallyInitialized(GV->isExternallyInitialized());
|
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
Summary:
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
A good example of improper behavior in the current implementation is section information associated with the GlobalObject. If a section was set for it, and GlobalOpt is creating/modifying a new object based on this one (often copying the original name), without this change new object will be placed in a default section, resulting in inappropriate properties of the new variable.
The argument here is that if customer specified a section for a variable, any changes to it that compiler does should not cause it to change that section allocation.
Moreover, any other properties worth representation in copyAttributesFrom() should also be propagated.
Reviewers: jmolloy, joker-eph, joker.eph
Subscribers: slarin, joker.eph, rafael, tobiasvk, llvm-commits
Differential Revision: http://reviews.llvm.org/D16074
llvm-svn: 258556
2016-01-23 05:18:20 +08:00
|
|
|
NGV->copyAttributesFrom(GV);
|
2015-12-23 03:16:50 +08:00
|
|
|
Globals.push_back(NGV);
|
2004-10-09 01:32:09 +08:00
|
|
|
NewGlobals.push_back(NGV);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-04-26 15:40:11 +08:00
|
|
|
// Calculate the known alignment of the field. If the original aggregate
|
|
|
|
// had 256 byte alignment for example, something might depend on that:
|
|
|
|
// propagate info to each field.
|
|
|
|
unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
|
|
|
|
if (NewAlign > EltAlign)
|
|
|
|
NGV->setAlignment(NewAlign);
|
2017-08-31 08:06:18 +08:00
|
|
|
transferSRADebugInfo(GV, NGV, FragmentSizeInBits * i, FragmentSizeInBits,
|
|
|
|
NumElements);
|
2004-10-09 01:32:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NewGlobals.empty())
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
|
2009-11-06 12:27:31 +08:00
|
|
|
Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
|
2004-10-09 01:32:09 +08:00
|
|
|
|
|
|
|
// Loop over all of the uses of the global, replacing the constantexpr geps,
|
|
|
|
// with smaller constantexpr geps or direct references.
|
|
|
|
while (!GV->use_empty()) {
|
2014-03-09 11:16:01 +08:00
|
|
|
User *GEP = GV->user_back();
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
assert(((isa<ConstantExpr>(GEP) &&
|
|
|
|
cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
|
|
|
|
isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2004-10-09 01:32:09 +08:00
|
|
|
// Ignore the 1th operand, which has to be zero or else the program is quite
|
|
|
|
// broken (undefined). Get the 2nd operand, which is the structure or array
|
|
|
|
// index.
|
2006-10-20 15:07:24 +08:00
|
|
|
unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
|
2004-10-09 01:32:09 +08:00
|
|
|
if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
|
|
|
|
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
Value *NewPtr = NewGlobals[Val];
|
2015-05-08 01:28:58 +08:00
|
|
|
Type *NewTy = NewGlobals[Val]->getValueType();
|
2004-10-09 01:32:09 +08:00
|
|
|
|
|
|
|
// Form a shorter GEP if needed.
|
2008-02-20 19:26:25 +08:00
|
|
|
if (GEP->getNumOperands() > 3) {
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
|
2007-01-31 12:40:53 +08:00
|
|
|
SmallVector<Constant*, 8> Idxs;
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
Idxs.push_back(NullInt);
|
|
|
|
for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
|
|
|
|
Idxs.push_back(CE->getOperand(i));
|
2015-04-03 02:55:32 +08:00
|
|
|
NewPtr =
|
|
|
|
ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
} else {
|
|
|
|
GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
|
2007-02-01 03:59:55 +08:00
|
|
|
SmallVector<Value*, 8> Idxs;
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
Idxs.push_back(NullInt);
|
|
|
|
for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
|
|
|
|
Idxs.push_back(GEPI->getOperand(i));
|
2015-03-14 09:53:18 +08:00
|
|
|
NewPtr = GetElementPtrInst::Create(
|
2015-05-08 01:28:58 +08:00
|
|
|
NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI);
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
}
|
2008-02-20 19:26:25 +08:00
|
|
|
}
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
GEP->replaceAllUsesWith(NewPtr);
|
|
|
|
|
|
|
|
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
|
2004-10-17 02:09:00 +08:00
|
|
|
GEPI->eraseFromParent();
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
else
|
|
|
|
cast<ConstantExpr>(GEP)->destroyConstant();
|
2004-10-09 01:32:09 +08:00
|
|
|
}
|
|
|
|
|
2004-10-09 04:25:55 +08:00
|
|
|
// Delete the old global, now that it is dead.
|
|
|
|
Globals.erase(GV);
|
2004-10-09 01:32:09 +08:00
|
|
|
++NumSRA;
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
|
|
|
|
// Loop over the new globals array deleting any globals that are obviously
|
|
|
|
// dead. This can arise due to scalarization of a structure or an array that
|
|
|
|
// has elements that are dead.
|
|
|
|
unsigned FirstGlobal = 0;
|
|
|
|
for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
|
|
|
|
if (NewGlobals[i]->use_empty()) {
|
|
|
|
Globals.erase(NewGlobals[i]);
|
|
|
|
if (FirstGlobal == i) ++FirstGlobal;
|
|
|
|
}
|
|
|
|
|
2014-04-25 13:29:35 +08:00
|
|
|
return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
|
2004-10-09 01:32:09 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Return true if all users of the specified value will trap if the value is
|
|
|
|
/// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid
|
|
|
|
/// reprocessing them.
|
2010-04-07 03:24:18 +08:00
|
|
|
static bool AllUsesOfValueWillTrapIfNull(const Value *V,
|
2014-08-21 13:55:13 +08:00
|
|
|
SmallPtrSetImpl<const PHINode*> &PHIs) {
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
for (const User *U : V->users()) {
|
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(U)) {
|
|
|
|
// If null pointer is considered valid, then all uses are non-trapping.
|
|
|
|
// Non address-space 0 globals have already been pruned by the caller.
|
|
|
|
if (NullPointerIsDefined(I->getFunction()))
|
|
|
|
return false;
|
|
|
|
}
|
2010-04-07 03:14:05 +08:00
|
|
|
if (isa<LoadInst>(U)) {
|
2004-10-10 05:48:45 +08:00
|
|
|
// Will trap.
|
2010-04-07 03:24:18 +08:00
|
|
|
} else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
2004-10-10 05:48:45 +08:00
|
|
|
if (SI->getOperand(0) == V) {
|
2010-04-07 03:14:05 +08:00
|
|
|
//cerr << "NONTRAPPING USE: " << *U;
|
2004-10-10 05:48:45 +08:00
|
|
|
return false; // Storing the value.
|
|
|
|
}
|
2010-04-07 03:24:18 +08:00
|
|
|
} else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
|
2010-03-21 05:00:25 +08:00
|
|
|
if (CI->getCalledValue() != V) {
|
2010-04-07 03:14:05 +08:00
|
|
|
//cerr << "NONTRAPPING USE: " << *U;
|
2004-10-10 05:48:45 +08:00
|
|
|
return false; // Not calling the ptr
|
|
|
|
}
|
2010-04-07 03:24:18 +08:00
|
|
|
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
|
2010-03-21 05:00:25 +08:00
|
|
|
if (II->getCalledValue() != V) {
|
2010-04-07 03:14:05 +08:00
|
|
|
//cerr << "NONTRAPPING USE: " << *U;
|
2004-10-10 05:48:45 +08:00
|
|
|
return false; // Not calling the ptr
|
|
|
|
}
|
2010-04-07 03:24:18 +08:00
|
|
|
} else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
|
2007-09-14 00:30:19 +08:00
|
|
|
if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
|
2010-04-07 03:24:18 +08:00
|
|
|
} else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
|
2007-09-14 00:30:19 +08:00
|
|
|
if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
|
2010-04-07 03:24:18 +08:00
|
|
|
} else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
|
2007-09-14 00:30:19 +08:00
|
|
|
// If we've already seen this phi node, ignore it, it has already been
|
|
|
|
// checked.
|
2014-11-19 15:49:26 +08:00
|
|
|
if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
|
2010-01-30 07:54:14 +08:00
|
|
|
return false;
|
2010-04-07 03:14:05 +08:00
|
|
|
} else if (isa<ICmpInst>(U) &&
|
2014-03-09 11:16:01 +08:00
|
|
|
isa<ConstantPointerNull>(U->getOperand(1))) {
|
2010-02-25 14:39:10 +08:00
|
|
|
// Ignore icmp X, null
|
2004-10-10 05:48:45 +08:00
|
|
|
} else {
|
2010-04-07 03:14:05 +08:00
|
|
|
//cerr << "NONTRAPPING USE: " << *U;
|
2004-10-10 05:48:45 +08:00
|
|
|
return false;
|
|
|
|
}
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
}
|
2004-10-10 05:48:45 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Return true if all uses of any loads from GV will trap if the loaded value
|
|
|
|
/// is null. Note that this also permits comparisons of the loaded value
|
|
|
|
/// against null, as a special case.
|
2010-04-07 03:24:18 +08:00
|
|
|
static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (const User *U : GV->users())
|
2010-04-07 03:24:18 +08:00
|
|
|
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
|
|
|
SmallPtrSet<const PHINode*, 8> PHIs;
|
2007-09-14 00:30:19 +08:00
|
|
|
if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
|
2004-10-10 05:48:45 +08:00
|
|
|
return false;
|
2010-04-07 03:14:05 +08:00
|
|
|
} else if (isa<StoreInst>(U)) {
|
2004-10-10 05:48:45 +08:00
|
|
|
// Ignore stores to the global.
|
|
|
|
} else {
|
|
|
|
// We don't know or understand this user, bail out.
|
2010-04-07 03:14:05 +08:00
|
|
|
//cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
|
2004-10-10 05:48:45 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-11-06 12:27:31 +08:00
|
|
|
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
|
2004-10-11 07:14:11 +08:00
|
|
|
bool Changed = false;
|
2014-03-09 11:16:01 +08:00
|
|
|
for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
|
2004-10-11 07:14:11 +08:00
|
|
|
Instruction *I = cast<Instruction>(*UI++);
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
// Uses are non-trapping if null pointer is considered valid.
|
|
|
|
// Non address-space 0 globals are already pruned by the caller.
|
|
|
|
if (NullPointerIsDefined(I->getFunction()))
|
|
|
|
return false;
|
2004-10-11 07:14:11 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
|
|
|
LI->setOperand(0, NewV);
|
|
|
|
Changed = true;
|
|
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
|
|
|
if (SI->getOperand(1) == V) {
|
|
|
|
SI->setOperand(1, NewV);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
} else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
|
2010-04-07 02:45:08 +08:00
|
|
|
CallSite CS(I);
|
|
|
|
if (CS.getCalledValue() == V) {
|
2004-10-11 07:14:11 +08:00
|
|
|
// Calling through the pointer! Turn into a direct call, but be careful
|
|
|
|
// that the pointer is not also being passed as an argument.
|
2010-04-07 02:45:08 +08:00
|
|
|
CS.setCalledFunction(NewV);
|
2004-10-11 07:14:11 +08:00
|
|
|
Changed = true;
|
|
|
|
bool PassedAsArg = false;
|
2010-04-07 02:45:08 +08:00
|
|
|
for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
|
|
|
|
if (CS.getArgument(i) == V) {
|
2004-10-11 07:14:11 +08:00
|
|
|
PassedAsArg = true;
|
2010-04-07 02:45:08 +08:00
|
|
|
CS.setArgument(i, NewV);
|
2004-10-11 07:14:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (PassedAsArg) {
|
|
|
|
// Being passed as an argument also. Be careful to not invalidate UI!
|
2014-03-09 11:16:01 +08:00
|
|
|
UI = V->user_begin();
|
2004-10-11 07:14:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
|
|
|
|
Changed |= OptimizeAwayTrappingUsesOfValue(CI,
|
2009-07-30 02:55:55 +08:00
|
|
|
ConstantExpr::getCast(CI->getOpcode(),
|
2009-11-06 12:27:31 +08:00
|
|
|
NewV, CI->getType()));
|
2004-10-11 07:14:11 +08:00
|
|
|
if (CI->use_empty()) {
|
|
|
|
Changed = true;
|
2004-10-17 02:09:00 +08:00
|
|
|
CI->eraseFromParent();
|
2004-10-11 07:14:11 +08:00
|
|
|
}
|
|
|
|
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
|
|
|
|
// Should handle GEP here.
|
2007-01-31 12:40:53 +08:00
|
|
|
SmallVector<Constant*, 8> Idxs;
|
|
|
|
Idxs.reserve(GEPI->getNumOperands()-1);
|
2008-05-29 09:59:18 +08:00
|
|
|
for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
|
|
|
|
i != e; ++i)
|
|
|
|
if (Constant *C = dyn_cast<Constant>(*i))
|
2007-01-31 12:40:53 +08:00
|
|
|
Idxs.push_back(C);
|
2004-10-11 07:14:11 +08:00
|
|
|
else
|
|
|
|
break;
|
2007-01-31 12:40:53 +08:00
|
|
|
if (Idxs.size() == GEPI->getNumOperands()-1)
|
2015-04-03 02:55:32 +08:00
|
|
|
Changed |= OptimizeAwayTrappingUsesOfValue(
|
|
|
|
GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs));
|
2004-10-11 07:14:11 +08:00
|
|
|
if (GEPI->use_empty()) {
|
|
|
|
Changed = true;
|
2004-10-17 02:09:00 +08:00
|
|
|
GEPI->eraseFromParent();
|
2004-10-11 07:14:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// The specified global has only one non-null value stored into it. If there
|
|
|
|
/// are uses of the loaded value that would trap if the loaded value is
|
|
|
|
/// dynamically null, then we know that they cannot be reachable with a null
|
|
|
|
/// optimize away the load.
|
2012-02-12 09:13:18 +08:00
|
|
|
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
2015-03-05 02:43:29 +08:00
|
|
|
const DataLayout &DL,
|
2012-02-12 09:13:18 +08:00
|
|
|
TargetLibraryInfo *TLI) {
|
2004-10-11 07:14:11 +08:00
|
|
|
bool Changed = false;
|
|
|
|
|
2009-01-14 08:12:58 +08:00
|
|
|
// Keep track of whether we are able to remove all the uses of the global
|
|
|
|
// other than the store that defines it.
|
|
|
|
bool AllNonStoreUsesGone = true;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2004-10-11 07:14:11 +08:00
|
|
|
// Replace all uses of loads with uses of uses of the stored value.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
|
2009-01-14 08:12:58 +08:00
|
|
|
User *GlobalUser = *GUI++;
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
|
2009-11-06 12:27:31 +08:00
|
|
|
Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
|
2009-01-14 08:12:58 +08:00
|
|
|
// If we were able to delete all uses of the loads
|
|
|
|
if (LI->use_empty()) {
|
|
|
|
LI->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
} else {
|
|
|
|
AllNonStoreUsesGone = false;
|
|
|
|
}
|
|
|
|
} else if (isa<StoreInst>(GlobalUser)) {
|
|
|
|
// Ignore the store that stores "LV" to the global.
|
|
|
|
assert(GlobalUser->getOperand(1) == GV &&
|
|
|
|
"Must be storing *to* the global");
|
2004-10-11 07:14:11 +08:00
|
|
|
} else {
|
2009-01-14 08:12:58 +08:00
|
|
|
AllNonStoreUsesGone = false;
|
|
|
|
|
|
|
|
// If we get here we could have other crazy uses that are transitively
|
|
|
|
// loaded.
|
|
|
|
assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
|
2012-09-28 18:01:27 +08:00
|
|
|
isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
|
|
|
|
isa<BitCastInst>(GlobalUser) ||
|
|
|
|
isa<GetElementPtrInst>(GlobalUser)) &&
|
2011-05-22 15:15:13 +08:00
|
|
|
"Only expect load and stores!");
|
2004-10-11 07:14:11 +08:00
|
|
|
}
|
2009-01-14 08:12:58 +08:00
|
|
|
}
|
2004-10-11 07:14:11 +08:00
|
|
|
|
|
|
|
if (Changed) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV
|
|
|
|
<< "\n");
|
2004-10-11 07:14:11 +08:00
|
|
|
++NumGlobUses;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we nuked all of the loads, then none of the stores are needed either,
|
|
|
|
// nor is the global.
|
2009-01-14 08:12:58 +08:00
|
|
|
if (AllNonStoreUsesGone) {
|
2012-07-24 15:21:08 +08:00
|
|
|
if (isLeakCheckerRoot(GV)) {
|
2012-08-29 23:32:21 +08:00
|
|
|
Changed |= CleanupPointerRootUsers(GV, TLI);
|
2012-07-24 15:21:08 +08:00
|
|
|
} else {
|
|
|
|
Changed = true;
|
2014-04-25 13:29:35 +08:00
|
|
|
CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
|
2012-07-24 15:21:08 +08:00
|
|
|
}
|
2004-10-11 07:14:11 +08:00
|
|
|
if (GV->use_empty()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
|
2012-07-24 15:21:08 +08:00
|
|
|
Changed = true;
|
2004-10-17 02:09:00 +08:00
|
|
|
GV->eraseFromParent();
|
2004-10-11 07:14:11 +08:00
|
|
|
++NumDeleted;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Walk the use list of V, constant folding all of the instructions that are
|
|
|
|
/// foldable.
|
2015-03-05 02:43:29 +08:00
|
|
|
static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
|
2014-02-25 07:12:18 +08:00
|
|
|
TargetLibraryInfo *TLI) {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
if (Instruction *I = dyn_cast<Instruction>(*UI++))
|
2015-03-10 10:37:25 +08:00
|
|
|
if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
I->replaceAllUsesWith(NewC);
|
|
|
|
|
2005-02-01 09:23:31 +08:00
|
|
|
// Advance UI to the next non-I use to avoid invalidating it!
|
|
|
|
// Instructions could multiply use V.
|
|
|
|
while (UI != E && *UI == I)
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
++UI;
|
2016-07-22 12:54:44 +08:00
|
|
|
if (isInstructionTriviallyDead(I, TLI))
|
|
|
|
I->eraseFromParent();
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// This function takes the specified global variable, and transforms the
|
|
|
|
/// program as if it always contained the result of the specified malloc.
|
|
|
|
/// Because it is always the result of the specified malloc, there is no reason
|
|
|
|
/// to actually DO the malloc. Instead, turn the malloc into a global, and any
|
|
|
|
/// loads of GV as uses of the new global.
|
2015-03-05 02:43:29 +08:00
|
|
|
static GlobalVariable *
|
|
|
|
OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
|
|
|
|
ConstantInt *NElements, const DataLayout &DL,
|
|
|
|
TargetLibraryInfo *TLI) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI
|
|
|
|
<< '\n');
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *GlobalType;
|
2010-02-26 06:33:52 +08:00
|
|
|
if (NElements->getZExtValue() == 1)
|
|
|
|
GlobalType = AllocTy;
|
|
|
|
else
|
|
|
|
// If we have an array allocation, the global variable is of an array.
|
|
|
|
GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
// Create the new global variable. The contents of the malloc'd memory is
|
|
|
|
// undefined, so initialize with an undef value.
|
2015-12-23 03:16:50 +08:00
|
|
|
GlobalVariable *NewGV = new GlobalVariable(
|
|
|
|
*GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
|
|
|
|
UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
|
|
|
|
GV->getThreadLocalMode());
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-02-26 06:33:52 +08:00
|
|
|
// If there are bitcast users of the malloc (which is typical, usually we have
|
|
|
|
// a malloc + bitcast) then replace them with uses of the new global. Update
|
|
|
|
// other users to use the global as well.
|
2014-04-25 13:29:35 +08:00
|
|
|
BitCastInst *TheBC = nullptr;
|
2010-02-26 06:33:52 +08:00
|
|
|
while (!CI->use_empty()) {
|
2014-03-09 11:16:01 +08:00
|
|
|
Instruction *User = cast<Instruction>(CI->user_back());
|
2010-02-26 06:33:52 +08:00
|
|
|
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
|
|
|
|
if (BCI->getType() == NewGV->getType()) {
|
|
|
|
BCI->replaceAllUsesWith(NewGV);
|
|
|
|
BCI->eraseFromParent();
|
|
|
|
} else {
|
|
|
|
BCI->setOperand(0, NewGV);
|
|
|
|
}
|
|
|
|
} else {
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!TheBC)
|
2010-02-26 06:33:52 +08:00
|
|
|
TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
|
|
|
|
User->replaceUsesOfWith(CI, TheBC);
|
|
|
|
}
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
Constant *RepValue = NewGV;
|
2016-01-17 04:30:46 +08:00
|
|
|
if (NewGV->getType() != GV->getValueType())
|
|
|
|
RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType());
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
// If there is a comparison against null, we will insert a global bool to
|
|
|
|
// keep track of whether the global was initialized yet or not.
|
|
|
|
GlobalVariable *InitBool =
|
2009-11-06 12:27:31 +08:00
|
|
|
new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
|
2009-09-19 06:35:49 +08:00
|
|
|
GlobalValue::InternalLinkage,
|
2009-11-06 12:27:31 +08:00
|
|
|
ConstantInt::getFalse(GV->getContext()),
|
2012-06-23 19:37:03 +08:00
|
|
|
GV->getName()+".init", GV->getThreadLocalMode());
|
2009-09-19 06:35:49 +08:00
|
|
|
bool InitBoolUsed = false;
|
|
|
|
|
|
|
|
// Loop over all uses of GV, processing them in turn.
|
2010-02-26 06:33:52 +08:00
|
|
|
while (!GV->use_empty()) {
|
2014-03-09 11:16:01 +08:00
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
|
2009-09-19 06:35:49 +08:00
|
|
|
// The global is initialized when the store to it occurs.
|
2012-02-06 03:56:38 +08:00
|
|
|
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
|
2017-07-12 06:23:00 +08:00
|
|
|
SI->getOrdering(), SI->getSyncScopeID(), SI);
|
2009-09-19 06:35:49 +08:00
|
|
|
SI->eraseFromParent();
|
2010-02-26 06:33:52 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2014-03-09 11:16:01 +08:00
|
|
|
LoadInst *LI = cast<LoadInst>(GV->user_back());
|
2010-02-26 06:33:52 +08:00
|
|
|
while (!LI->use_empty()) {
|
2014-03-09 11:16:01 +08:00
|
|
|
Use &LoadUse = *LI->use_begin();
|
|
|
|
ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
|
|
|
|
if (!ICI) {
|
2010-02-26 06:33:52 +08:00
|
|
|
LoadUse = RepValue;
|
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-02-26 06:33:52 +08:00
|
|
|
// Replace the cmp X, 0 with a use of the bool value.
|
2012-02-06 03:56:38 +08:00
|
|
|
// Sink the load to where the compare was, if atomic rules allow us to.
|
|
|
|
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
|
2017-07-12 06:23:00 +08:00
|
|
|
LI->getOrdering(), LI->getSyncScopeID(),
|
2012-02-06 03:56:38 +08:00
|
|
|
LI->isUnordered() ? (Instruction*)ICI : LI);
|
2010-02-26 06:33:52 +08:00
|
|
|
InitBoolUsed = true;
|
|
|
|
switch (ICI->getPredicate()) {
|
|
|
|
default: llvm_unreachable("Unknown ICmp Predicate!");
|
|
|
|
case ICmpInst::ICMP_ULT:
|
|
|
|
case ICmpInst::ICMP_SLT: // X < null -> always false
|
|
|
|
LV = ConstantInt::getFalse(GV->getContext());
|
|
|
|
break;
|
|
|
|
case ICmpInst::ICMP_ULE:
|
|
|
|
case ICmpInst::ICMP_SLE:
|
|
|
|
case ICmpInst::ICMP_EQ:
|
|
|
|
LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
|
|
|
|
break;
|
|
|
|
case ICmpInst::ICMP_NE:
|
|
|
|
case ICmpInst::ICMP_UGE:
|
|
|
|
case ICmpInst::ICMP_SGE:
|
|
|
|
case ICmpInst::ICMP_UGT:
|
|
|
|
case ICmpInst::ICMP_SGT:
|
|
|
|
break; // no change.
|
|
|
|
}
|
|
|
|
ICI->replaceAllUsesWith(LV);
|
|
|
|
ICI->eraseFromParent();
|
2009-09-19 06:35:49 +08:00
|
|
|
}
|
2010-02-26 06:33:52 +08:00
|
|
|
LI->eraseFromParent();
|
|
|
|
}
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
// If the initialization boolean was used, insert it, otherwise delete it.
|
|
|
|
if (!InitBoolUsed) {
|
|
|
|
while (!InitBool->use_empty()) // Delete initializations
|
2014-03-09 11:16:01 +08:00
|
|
|
cast<StoreInst>(InitBool->user_back())->eraseFromParent();
|
2009-09-19 06:35:49 +08:00
|
|
|
delete InitBool;
|
|
|
|
} else
|
2015-10-14 01:51:03 +08:00
|
|
|
GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
|
2009-09-19 06:35:49 +08:00
|
|
|
|
2010-02-26 06:33:52 +08:00
|
|
|
// Now the GV is dead, nuke it and the malloc..
|
2009-09-19 06:35:49 +08:00
|
|
|
GV->eraseFromParent();
|
|
|
|
CI->eraseFromParent();
|
|
|
|
|
|
|
|
// To further other optimizations, loop over all users of NewGV and try to
|
|
|
|
// constant prop them. This will promote GEP instructions with constant
|
|
|
|
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
|
2014-02-21 08:06:31 +08:00
|
|
|
ConstantPropUsersOf(NewGV, DL, TLI);
|
2009-09-19 06:35:49 +08:00
|
|
|
if (RepValue != NewGV)
|
2014-02-21 08:06:31 +08:00
|
|
|
ConstantPropUsersOf(RepValue, DL, TLI);
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
return NewGV;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Scan the use-list of V checking to make sure that there are no complex uses
|
|
|
|
/// of V. We permit simple things like dereferencing the pointer, but not
|
|
|
|
/// storing through the address, unless it is to the specified global.
|
2010-04-07 02:58:22 +08:00
|
|
|
static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
|
|
|
|
const GlobalVariable *GV,
|
2014-08-21 13:55:13 +08:00
|
|
|
SmallPtrSetImpl<const PHINode*> &PHIs) {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (const User *U : V->users()) {
|
|
|
|
const Instruction *Inst = cast<Instruction>(U);
|
2010-04-07 03:14:05 +08:00
|
|
|
|
2008-12-16 05:08:54 +08:00
|
|
|
if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
|
|
|
|
continue; // Fine, ignore.
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-04-07 02:58:22 +08:00
|
|
|
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
2004-12-02 15:11:07 +08:00
|
|
|
if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
|
|
|
|
return false; // Storing the pointer itself... bad.
|
2008-12-16 05:08:54 +08:00
|
|
|
continue; // Otherwise, storing through it, or storing into GV... fine.
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-04-11 02:19:22 +08:00
|
|
|
// Must index into the array and into the struct.
|
|
|
|
if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
|
2008-12-16 05:08:54 +08:00
|
|
|
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
|
2004-12-02 15:11:07 +08:00
|
|
|
return false;
|
2008-12-16 05:08:54 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-04-07 02:58:22 +08:00
|
|
|
if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
|
2007-09-14 00:37:20 +08:00
|
|
|
// PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
|
|
|
|
// cycles.
|
2014-11-19 15:49:26 +08:00
|
|
|
if (PHIs.insert(PN).second)
|
2007-09-14 11:41:21 +08:00
|
|
|
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
|
|
|
|
return false;
|
2008-12-16 05:08:54 +08:00
|
|
|
continue;
|
2004-12-02 15:11:07 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-04-07 02:58:22 +08:00
|
|
|
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
|
2008-12-16 05:08:54 +08:00
|
|
|
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
|
|
|
|
return false;
|
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-16 05:08:54 +08:00
|
|
|
return false;
|
|
|
|
}
|
2004-12-02 15:11:07 +08:00
|
|
|
return true;
|
2006-10-01 07:32:09 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// The Alloc pointer is stored into GV somewhere. Transform all uses of the
|
|
|
|
/// allocation into loads from the global and uses of the resultant pointer.
|
|
|
|
/// Further, delete the store into GV. This assumes that these value pass the
|
2006-10-01 07:32:09 +08:00
|
|
|
/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
|
2010-10-19 05:16:00 +08:00
|
|
|
static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
|
2006-10-01 07:32:09 +08:00
|
|
|
GlobalVariable *GV) {
|
|
|
|
while (!Alloc->use_empty()) {
|
2014-03-09 11:16:01 +08:00
|
|
|
Instruction *U = cast<Instruction>(*Alloc->user_begin());
|
2007-09-14 02:00:31 +08:00
|
|
|
Instruction *InsertPt = U;
|
2006-10-01 07:32:09 +08:00
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
|
|
|
// If this is the store of the allocation into the global, remove it.
|
|
|
|
if (SI->getOperand(1) == GV) {
|
|
|
|
SI->eraseFromParent();
|
|
|
|
continue;
|
|
|
|
}
|
2007-09-14 02:00:31 +08:00
|
|
|
} else if (PHINode *PN = dyn_cast<PHINode>(U)) {
|
|
|
|
// Insert the load in the corresponding predecessor, not right before the
|
|
|
|
// PHI.
|
2014-03-09 11:16:01 +08:00
|
|
|
InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
|
2008-12-16 05:44:34 +08:00
|
|
|
} else if (isa<BitCastInst>(U)) {
|
|
|
|
// Must be bitcast between the malloc and store to initialize the global.
|
|
|
|
ReplaceUsesOfMallocWithGlobal(U, GV);
|
|
|
|
U->eraseFromParent();
|
|
|
|
continue;
|
|
|
|
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
|
|
|
|
// If this is a "GEP bitcast" and the user is a store to the global, then
|
|
|
|
// just process it as a bitcast.
|
|
|
|
if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
|
2014-03-09 11:16:01 +08:00
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
|
2008-12-16 05:44:34 +08:00
|
|
|
if (SI->getOperand(1) == GV) {
|
|
|
|
// Must be bitcast GEP between the malloc and store to initialize
|
|
|
|
// the global.
|
|
|
|
ReplaceUsesOfMallocWithGlobal(GEPI, GV);
|
|
|
|
GEPI->eraseFromParent();
|
|
|
|
continue;
|
|
|
|
}
|
2006-10-01 07:32:09 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2006-10-01 07:32:09 +08:00
|
|
|
// Insert a load from the global, and use it instead of the malloc.
|
2007-09-14 02:00:31 +08:00
|
|
|
Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
|
2006-10-01 07:32:09 +08:00
|
|
|
U->replaceUsesOfWith(Alloc, NL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Verify that all uses of V (a load, or a phi of a load) are simple enough to
|
|
|
|
/// perform heap SRA on. This permits GEP's that index through the array and
|
|
|
|
/// struct field, icmps of null, and PHIs.
|
2010-04-01 16:21:08 +08:00
|
|
|
static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
|
2014-08-21 13:55:13 +08:00
|
|
|
SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
|
|
|
|
SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
|
2008-12-17 05:24:51 +08:00
|
|
|
// We permit two users of the load: setcc comparing against the null
|
|
|
|
// pointer, and a getelementptr of a specific form.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (const User *U : V->users()) {
|
|
|
|
const Instruction *UI = cast<Instruction>(U);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 05:24:51 +08:00
|
|
|
// Comparison against null is ok.
|
2014-03-09 11:16:01 +08:00
|
|
|
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
|
2008-12-17 05:24:51 +08:00
|
|
|
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
|
|
|
|
return false;
|
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 05:24:51 +08:00
|
|
|
// getelementptr is also ok, but only a simple form.
|
2014-03-09 11:16:01 +08:00
|
|
|
if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
|
2008-12-17 05:24:51 +08:00
|
|
|
// Must index into the array and into the struct.
|
|
|
|
if (GEPI->getNumOperands() < 3)
|
|
|
|
return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 05:24:51 +08:00
|
|
|
// Otherwise the GEP is ok.
|
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2014-03-09 11:16:01 +08:00
|
|
|
if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!LoadUsingPHIsPerLoad.insert(PN).second)
|
2009-06-02 08:56:07 +08:00
|
|
|
// This means some phi nodes are dependent on each other.
|
|
|
|
// Avoid infinite looping!
|
|
|
|
return false;
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!LoadUsingPHIs.insert(PN).second)
|
2009-06-02 08:56:07 +08:00
|
|
|
// If we have already analyzed this PHI, then it is safe.
|
2008-12-17 05:24:51 +08:00
|
|
|
continue;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// Make sure all uses of the PHI are simple enough to transform.
|
2009-06-02 08:56:07 +08:00
|
|
|
if (!LoadUsesSimpleEnoughForHeapSRA(PN,
|
|
|
|
LoadUsingPHIs, LoadUsingPHIsPerLoad))
|
2008-12-17 05:24:51 +08:00
|
|
|
return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 05:24:51 +08:00
|
|
|
continue;
|
2006-10-01 07:32:09 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 05:24:51 +08:00
|
|
|
// Otherwise we don't know what this is, not ok.
|
|
|
|
return false;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 05:24:51 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// If all users of values loaded from GV are simple enough to perform HeapSRA,
|
|
|
|
/// return true.
|
2010-04-01 16:21:08 +08:00
|
|
|
static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
|
2009-09-19 06:35:49 +08:00
|
|
|
Instruction *StoredVal) {
|
2010-04-01 16:21:08 +08:00
|
|
|
SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
|
|
|
|
SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
|
2014-03-09 11:16:01 +08:00
|
|
|
for (const User *U : GV->users())
|
|
|
|
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
2009-06-02 08:56:07 +08:00
|
|
|
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
|
|
|
|
LoadUsingPHIsPerLoad))
|
2008-12-17 13:28:49 +08:00
|
|
|
return false;
|
2009-06-02 08:56:07 +08:00
|
|
|
LoadUsingPHIsPerLoad.clear();
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// If we reach here, we know that all uses of the loads and transitive uses
|
|
|
|
// (through PHI nodes) are simple enough to transform. However, we don't know
|
2010-10-19 05:16:00 +08:00
|
|
|
// that all inputs the to the PHI nodes are in the same equivalence sets.
|
2008-12-17 13:28:49 +08:00
|
|
|
// Check to verify that all operands of the PHIs are either PHIS that can be
|
|
|
|
// transformed, loads from GV, or MI itself.
|
2014-08-25 07:23:06 +08:00
|
|
|
for (const PHINode *PN : LoadUsingPHIs) {
|
2008-12-17 13:28:49 +08:00
|
|
|
for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
|
|
|
|
Value *InVal = PN->getIncomingValue(op);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// PHI of the stored value itself is ok.
|
2009-09-19 06:35:49 +08:00
|
|
|
if (InVal == StoredVal) continue;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-04-01 16:21:08 +08:00
|
|
|
if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
|
2008-12-17 13:28:49 +08:00
|
|
|
// One of the PHIs in our set is (optimistically) ok.
|
|
|
|
if (LoadUsingPHIs.count(InPN))
|
|
|
|
continue;
|
2008-12-17 05:24:51 +08:00
|
|
|
return false;
|
2008-12-17 13:28:49 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// Load from GV is ok.
|
2010-04-01 16:21:08 +08:00
|
|
|
if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
|
2008-12-17 13:28:49 +08:00
|
|
|
if (LI->getOperand(0) == GV)
|
|
|
|
continue;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// UNDEF? NULL?
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// Anything else is rejected.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2006-10-01 07:32:09 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
2017-10-11 06:49:55 +08:00
|
|
|
DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
|
|
|
|
std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
|
|
|
|
std::vector<Value *> &FieldVals = InsertedScalarizedValues[V];
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
if (FieldNo >= FieldVals.size())
|
|
|
|
FieldVals.resize(FieldNo+1);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// If we already have this value, just reuse the previously scalarized
|
|
|
|
// version.
|
|
|
|
if (Value *FieldVal = FieldVals[FieldNo])
|
|
|
|
return FieldVal;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// Depending on what instruction this is, we have several cases.
|
|
|
|
Value *Result;
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
|
|
|
|
// This is a scalarized version of the load from the global. Just create
|
|
|
|
// a new Load of the scalarized global.
|
|
|
|
Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
|
|
|
|
InsertedScalarizedValues,
|
2009-11-06 12:27:31 +08:00
|
|
|
PHIsToRewrite),
|
2009-07-31 01:37:43 +08:00
|
|
|
LI->getName()+".f"+Twine(FieldNo), LI);
|
2015-03-14 09:53:18 +08:00
|
|
|
} else {
|
|
|
|
PHINode *PN = cast<PHINode>(V);
|
2008-12-17 13:28:49 +08:00
|
|
|
// PN's type is pointer to struct. Make a new PHI of pointer to struct
|
|
|
|
// field.
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2014-04-24 04:36:10 +08:00
|
|
|
PointerType *PTy = cast<PointerType>(PN->getType());
|
|
|
|
StructType *ST = cast<StructType>(PTy->getElementType());
|
|
|
|
|
|
|
|
unsigned AS = PTy->getAddressSpace();
|
2011-03-30 19:19:20 +08:00
|
|
|
PHINode *NewPN =
|
2014-04-24 04:36:10 +08:00
|
|
|
PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
|
2011-03-30 19:28:46 +08:00
|
|
|
PN->getNumIncomingValues(),
|
2009-07-31 01:37:43 +08:00
|
|
|
PN->getName()+".f"+Twine(FieldNo), PN);
|
2011-03-30 19:19:20 +08:00
|
|
|
Result = NewPN;
|
2008-12-17 13:28:49 +08:00
|
|
|
PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
return FieldVals[FieldNo] = Result;
|
2007-09-14 02:00:31 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Given a load instruction and a value derived from the load, rewrite the
|
|
|
|
/// derived value to use the HeapSRoA'd load.
|
2010-10-19 05:16:00 +08:00
|
|
|
static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
2017-10-11 06:49:55 +08:00
|
|
|
DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
|
|
|
|
std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
|
2007-09-14 01:29:05 +08:00
|
|
|
// If this is a comparison against null, handle it.
|
|
|
|
if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
|
|
|
|
assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
|
|
|
|
// If we have a setcc of the loaded pointer, we can use a setcc of any
|
|
|
|
// field.
|
2008-12-17 13:28:49 +08:00
|
|
|
Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
|
2009-11-06 12:27:31 +08:00
|
|
|
InsertedScalarizedValues, PHIsToRewrite);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-07-10 07:48:35 +08:00
|
|
|
Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
|
2010-10-19 05:16:00 +08:00
|
|
|
Constant::getNullValue(NPtr->getType()),
|
2009-07-10 07:48:35 +08:00
|
|
|
SCI->getName());
|
2007-09-14 01:29:05 +08:00
|
|
|
SCI->replaceAllUsesWith(New);
|
|
|
|
SCI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
|
2007-09-14 02:00:31 +08:00
|
|
|
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
|
|
|
|
assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
|
|
|
|
&& "Unexpected GEPI!");
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2007-09-14 02:00:31 +08:00
|
|
|
// Load the pointer for this field.
|
|
|
|
unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
|
2008-12-17 13:28:49 +08:00
|
|
|
Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
|
2009-11-06 12:27:31 +08:00
|
|
|
InsertedScalarizedValues, PHIsToRewrite);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2007-09-14 02:00:31 +08:00
|
|
|
// Create the new GEP idx vector.
|
|
|
|
SmallVector<Value*, 8> GEPIdx;
|
|
|
|
GEPIdx.push_back(GEPI->getOperand(1));
|
|
|
|
GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2015-03-15 03:24:04 +08:00
|
|
|
Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
|
2008-04-07 04:25:17 +08:00
|
|
|
GEPI->getName(), GEPI);
|
2007-09-14 02:00:31 +08:00
|
|
|
GEPI->replaceAllUsesWith(NGEPI);
|
|
|
|
GEPI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
2008-12-17 13:28:49 +08:00
|
|
|
|
|
|
|
// Recursively transform the users of PHI nodes. This will lazily create the
|
|
|
|
// PHIs that are needed for individual elements. Keep track of what PHIs we
|
|
|
|
// see in InsertedScalarizedValues so that we don't get infinite loops (very
|
|
|
|
// antisocial). If the PHI is already in InsertedScalarizedValues, it has
|
|
|
|
// already been seen first by another load, so its uses have already been
|
|
|
|
// processed.
|
2007-09-14 02:00:31 +08:00
|
|
|
PHINode *PN = cast<PHINode>(LoadUser);
|
2011-07-21 14:21:31 +08:00
|
|
|
if (!InsertedScalarizedValues.insert(std::make_pair(PN,
|
2017-10-11 06:49:55 +08:00
|
|
|
std::vector<Value *>())).second)
|
2011-07-21 14:21:31 +08:00
|
|
|
return;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
// If this is the first time we've seen this PHI, recursively process all
|
|
|
|
// users.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
|
2008-12-17 13:42:08 +08:00
|
|
|
Instruction *User = cast<Instruction>(*UI++);
|
2009-11-06 12:27:31 +08:00
|
|
|
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
|
2008-12-17 13:42:08 +08:00
|
|
|
}
|
2007-09-14 01:29:05 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// We are performing Heap SRoA on a global. Ptr is a value loaded from the
|
|
|
|
/// global. Eliminate all uses of Ptr, making them use FieldGlobals instead.
|
|
|
|
/// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA.
|
2010-10-19 05:16:00 +08:00
|
|
|
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
|
2017-10-11 06:49:55 +08:00
|
|
|
DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
|
|
|
|
std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
|
2008-12-17 13:42:08 +08:00
|
|
|
Instruction *User = cast<Instruction>(*UI++);
|
2009-11-06 12:27:31 +08:00
|
|
|
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
|
2008-12-17 13:42:08 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2008-12-17 13:28:49 +08:00
|
|
|
if (Load->use_empty()) {
|
|
|
|
Load->eraseFromParent();
|
|
|
|
InsertedScalarizedValues.erase(Load);
|
|
|
|
}
|
2006-10-01 07:32:09 +08:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// CI is an allocation of an array of structures. Break it up into multiple
|
|
|
|
/// allocations of arrays of the fields.
|
2009-11-07 08:16:28 +08:00
|
|
|
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
|
2015-03-05 02:43:29 +08:00
|
|
|
Value *NElems, const DataLayout &DL,
|
2012-08-29 23:32:21 +08:00
|
|
|
const TargetLibraryInfo *TLI) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI
|
|
|
|
<< '\n');
|
2012-08-29 23:32:21 +08:00
|
|
|
Type *MAT = getMallocAllocatedType(CI, TLI);
|
2011-07-18 12:54:35 +08:00
|
|
|
StructType *STy = cast<StructType>(MAT);
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
// There is guaranteed to be at least one use of the malloc (storing
|
|
|
|
// it into GV). If there are other uses, change them to be uses of
|
|
|
|
// the global to simplify later code. This also deletes the store
|
|
|
|
// into GV.
|
2009-11-07 08:16:28 +08:00
|
|
|
ReplaceUsesOfMallocWithGlobal(CI, GV);
|
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Okay, at this point, there are no users of the malloc. Insert N
|
|
|
|
// new mallocs at the same place as CI, and N globals.
|
2017-10-11 06:49:55 +08:00
|
|
|
std::vector<Value *> FieldGlobals;
|
|
|
|
std::vector<Value *> FieldMallocs;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2016-04-29 16:07:22 +08:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
CI->getOperandBundlesAsDefs(OpBundles);
|
|
|
|
|
2014-04-24 04:36:10 +08:00
|
|
|
unsigned AS = GV->getType()->getPointerAddressSpace();
|
2009-09-19 06:35:49 +08:00
|
|
|
for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *FieldTy = STy->getElementType(FieldNo);
|
2014-04-24 04:36:10 +08:00
|
|
|
PointerType *PFieldTy = PointerType::get(FieldTy, AS);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2015-12-23 03:16:50 +08:00
|
|
|
GlobalVariable *NGV = new GlobalVariable(
|
|
|
|
*GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage,
|
|
|
|
Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo),
|
|
|
|
nullptr, GV->getThreadLocalMode());
|
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
Summary:
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
A good example of improper behavior in the current implementation is section information associated with the GlobalObject. If a section was set for it, and GlobalOpt is creating/modifying a new object based on this one (often copying the original name), without this change new object will be placed in a default section, resulting in inappropriate properties of the new variable.
The argument here is that if customer specified a section for a variable, any changes to it that compiler does should not cause it to change that section allocation.
Moreover, any other properties worth representation in copyAttributesFrom() should also be propagated.
Reviewers: jmolloy, joker-eph, joker.eph
Subscribers: slarin, joker.eph, rafael, tobiasvk, llvm-commits
Differential Revision: http://reviews.llvm.org/D16074
llvm-svn: 258556
2016-01-23 05:18:20 +08:00
|
|
|
NGV->copyAttributesFrom(GV);
|
2009-09-19 06:35:49 +08:00
|
|
|
FieldGlobals.push_back(NGV);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2015-03-05 02:43:29 +08:00
|
|
|
unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
|
2011-07-18 12:54:35 +08:00
|
|
|
if (StructType *ST = dyn_cast<StructType>(FieldTy))
|
2015-03-05 02:43:29 +08:00
|
|
|
TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
|
|
|
|
Type *IntPtrTy = DL.getIntPtrType(CI->getType());
|
2009-11-07 08:16:28 +08:00
|
|
|
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
|
|
|
|
ConstantInt::get(IntPtrTy, TypeSize),
|
2016-04-29 16:07:22 +08:00
|
|
|
NElems, OpBundles, nullptr,
|
2009-11-07 08:16:28 +08:00
|
|
|
CI->getName() + ".f" + Twine(FieldNo));
|
2010-02-27 02:23:13 +08:00
|
|
|
FieldMallocs.push_back(NMI);
|
2009-11-07 08:16:28 +08:00
|
|
|
new StoreInst(NMI, NGV, CI);
|
2009-09-19 06:35:49 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// The tricky aspect of this transformation is handling the case when malloc
|
|
|
|
// fails. In the original code, malloc failing would set the result pointer
|
|
|
|
// of malloc to null. In this case, some mallocs could succeed and others
|
|
|
|
// could fail. As such, we emit code that looks like this:
|
|
|
|
// F0 = malloc(field0)
|
|
|
|
// F1 = malloc(field1)
|
|
|
|
// F2 = malloc(field2)
|
|
|
|
// if (F0 == 0 || F1 == 0 || F2 == 0) {
|
|
|
|
// if (F0) { free(F0); F0 = 0; }
|
|
|
|
// if (F1) { free(F1); F1 = 0; }
|
|
|
|
// if (F2) { free(F2); F2 = 0; }
|
|
|
|
// }
|
2009-11-10 16:32:25 +08:00
|
|
|
// The malloc can also fail if its argument is too large.
|
2010-06-24 22:42:01 +08:00
|
|
|
Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
|
|
|
|
Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
|
2009-11-10 16:32:25 +08:00
|
|
|
ConstantZero, "isneg");
|
2009-09-19 06:35:49 +08:00
|
|
|
for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
|
2009-11-07 08:16:28 +08:00
|
|
|
Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
|
|
|
|
Constant::getNullValue(FieldMallocs[i]->getType()),
|
|
|
|
"isnull");
|
2009-11-10 16:32:25 +08:00
|
|
|
RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
|
2009-09-19 06:35:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Split the basic block at the old malloc.
|
2009-11-07 08:16:28 +08:00
|
|
|
BasicBlock *OrigBB = CI->getParent();
|
2015-10-14 01:51:03 +08:00
|
|
|
BasicBlock *ContBB =
|
|
|
|
OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont");
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Create the block to check the first condition. Put all these blocks at the
|
|
|
|
// end of the function as they are unlikely to be executed.
|
2009-11-06 12:27:31 +08:00
|
|
|
BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
|
|
|
|
"malloc_ret_null",
|
2009-09-19 06:35:49 +08:00
|
|
|
OrigBB->getParent());
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Remove the uncond branch from OrigBB to ContBB, turning it into a cond
|
|
|
|
// branch on RunningOr.
|
|
|
|
OrigBB->getTerminator()->eraseFromParent();
|
|
|
|
BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Within the NullPtrBlock, we need to emit a comparison and branch for each
|
|
|
|
// pointer, because some may be null while others are not.
|
|
|
|
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
|
|
|
|
Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
|
2010-10-19 05:16:00 +08:00
|
|
|
Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
|
2011-09-28 04:39:19 +08:00
|
|
|
Constant::getNullValue(GVVal->getType()));
|
2009-11-06 12:27:31 +08:00
|
|
|
BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
|
2009-09-19 06:35:49 +08:00
|
|
|
OrigBB->getParent());
|
2009-11-06 12:27:31 +08:00
|
|
|
BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
|
2009-09-19 06:35:49 +08:00
|
|
|
OrigBB->getParent());
|
2009-10-24 12:23:03 +08:00
|
|
|
Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
|
|
|
|
Cmp, NullPtrBlock);
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
// Fill in FreeBlock.
|
2016-04-29 16:07:22 +08:00
|
|
|
CallInst::CreateFree(GVVal, OpBundles, BI);
|
2009-09-19 06:35:49 +08:00
|
|
|
new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
|
|
|
|
FreeBlock);
|
|
|
|
BranchInst::Create(NextBlock, FreeBlock);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
NullPtrBlock = NextBlock;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
BranchInst::Create(ContBB, NullPtrBlock);
|
2009-11-07 08:16:28 +08:00
|
|
|
|
|
|
|
// CI is no longer needed, remove it.
|
2009-09-19 06:35:49 +08:00
|
|
|
CI->eraseFromParent();
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// As we process loads, if we can't immediately update all uses of the load,
|
|
|
|
/// keep track of what scalarized loads are inserted for a given load.
|
2017-10-11 06:49:55 +08:00
|
|
|
DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues;
|
2009-09-19 06:35:49 +08:00
|
|
|
InsertedScalarizedValues[GV] = FieldGlobals;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Okay, the malloc site is completely handled. All of the uses of GV are now
|
|
|
|
// loads, and all uses of those loads are simple. Rewrite them to use loads
|
|
|
|
// of the per-field globals instead.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
|
2009-09-19 06:35:49 +08:00
|
|
|
Instruction *User = cast<Instruction>(*UI++);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
2009-11-06 12:27:31 +08:00
|
|
|
RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
|
2009-09-19 06:35:49 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Must be a store of null.
|
|
|
|
StoreInst *SI = cast<StoreInst>(User);
|
|
|
|
assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
|
|
|
|
"Unexpected heap-sra user!");
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Insert a store of null into each global.
|
|
|
|
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
|
2016-01-17 04:30:46 +08:00
|
|
|
Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType();
|
|
|
|
Constant *Null = Constant::getNullValue(ValTy);
|
2009-09-19 06:35:49 +08:00
|
|
|
new StoreInst(Null, FieldGlobals[i], SI);
|
|
|
|
}
|
|
|
|
// Erase the original store.
|
|
|
|
SI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
|
|
|
// While we have PHIs that are interesting to rewrite, do it.
|
|
|
|
while (!PHIsToRewrite.empty()) {
|
|
|
|
PHINode *PN = PHIsToRewrite.back().first;
|
|
|
|
unsigned FieldNo = PHIsToRewrite.back().second;
|
|
|
|
PHIsToRewrite.pop_back();
|
|
|
|
PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
|
|
|
|
assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
|
|
|
|
|
|
|
|
// Add all the incoming values. This can materialize more phis.
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
Value *InVal = PN->getIncomingValue(i);
|
|
|
|
InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
|
2009-11-06 12:27:31 +08:00
|
|
|
PHIsToRewrite);
|
2009-09-19 06:35:49 +08:00
|
|
|
FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
|
|
|
|
}
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Drop all inter-phi links and any loads that made it this far.
|
2017-10-11 06:49:55 +08:00
|
|
|
for (DenseMap<Value *, std::vector<Value *>>::iterator
|
2009-09-19 06:35:49 +08:00
|
|
|
I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (PHINode *PN = dyn_cast<PHINode>(I->first))
|
|
|
|
PN->dropAllReferences();
|
|
|
|
else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
|
|
|
|
LI->dropAllReferences();
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// Delete all the phis and loads now that inter-references are dead.
|
2017-10-11 06:49:55 +08:00
|
|
|
for (DenseMap<Value *, std::vector<Value *>>::iterator
|
2009-09-19 06:35:49 +08:00
|
|
|
I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (PHINode *PN = dyn_cast<PHINode>(I->first))
|
|
|
|
PN->eraseFromParent();
|
|
|
|
else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
|
|
|
|
LI->eraseFromParent();
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
// The old global is now dead, remove it.
|
|
|
|
GV->eraseFromParent();
|
|
|
|
|
|
|
|
++NumHeapSRA;
|
|
|
|
return cast<GlobalVariable>(FieldGlobals[0]);
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// This function is called when we see a pointer global variable with a single
|
|
|
|
/// value stored it that is a malloc or cast of malloc.
|
2015-12-23 03:16:50 +08:00
|
|
|
static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *AllocTy,
|
2012-02-06 03:56:38 +08:00
|
|
|
AtomicOrdering Ordering,
|
2015-03-05 02:43:29 +08:00
|
|
|
const DataLayout &DL,
|
2012-02-12 09:13:18 +08:00
|
|
|
TargetLibraryInfo *TLI) {
|
2009-09-19 06:35:49 +08:00
|
|
|
// If this is a malloc of an abstract type, don't touch it.
|
|
|
|
if (!AllocTy->isSized())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We can't optimize this global unless all uses of it are *known* to be
|
|
|
|
// of the malloc value, not of the null initializer value (consider a use
|
|
|
|
// that compares the global's value against zero to see if the malloc has
|
|
|
|
// been reached). To do this, we check to see if all uses of the global
|
|
|
|
// would trap if the global were null: this proves that they must all
|
|
|
|
// happen after the malloc.
|
|
|
|
if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We can't optimize this if the malloc itself is used in a complex way,
|
|
|
|
// for example, being stored into multiple globals. This allows the
|
2012-02-06 03:48:37 +08:00
|
|
|
// malloc to be stored into the specified global, loaded icmp'd, and
|
2009-09-19 06:35:49 +08:00
|
|
|
// GEP'd. These are all things we could transform to using the global
|
|
|
|
// for.
|
2010-04-15 04:52:55 +08:00
|
|
|
SmallPtrSet<const PHINode*, 8> PHIs;
|
|
|
|
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
|
|
|
|
return false;
|
2009-09-19 06:35:49 +08:00
|
|
|
|
|
|
|
// If we have a global that is only initialized with a fixed size malloc,
|
|
|
|
// transform the program to use global memory instead of malloc'd memory.
|
|
|
|
// This eliminates dynamic allocation, avoids an indirection accessing the
|
|
|
|
// data, and exposes the resultant global to further GlobalOpt.
|
2009-10-17 07:12:25 +08:00
|
|
|
// We cannot optimize the malloc if we cannot determine malloc array size.
|
2015-03-10 10:37:25 +08:00
|
|
|
Value *NElems = getMallocArraySize(CI, DL, TLI, true);
|
2010-04-15 04:52:55 +08:00
|
|
|
if (!NElems)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
|
|
|
|
// Restrict this transformation to only working on small allocations
|
|
|
|
// (2048 bytes currently), as we don't want to introduce a 16M global or
|
|
|
|
// something.
|
2015-03-05 02:43:29 +08:00
|
|
|
if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
|
2015-12-23 03:16:50 +08:00
|
|
|
OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
|
2010-04-15 04:52:55 +08:00
|
|
|
return true;
|
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2010-04-15 04:52:55 +08:00
|
|
|
// If the allocation is an array of structures, consider transforming this
|
|
|
|
// into multiple malloc'd arrays, one for each field. This is basically
|
|
|
|
// SRoA for malloc'd memory.
|
|
|
|
|
2016-04-07 05:19:33 +08:00
|
|
|
if (Ordering != AtomicOrdering::NotAtomic)
|
2012-02-06 03:56:38 +08:00
|
|
|
return false;
|
|
|
|
|
2010-04-15 04:52:55 +08:00
|
|
|
// If this is an allocation of a fixed size array of structs, analyze as a
|
|
|
|
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
|
2010-06-24 22:42:01 +08:00
|
|
|
if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
|
2011-07-18 12:54:35 +08:00
|
|
|
if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
|
2010-04-15 04:52:55 +08:00
|
|
|
AllocTy = AT->getElementType();
|
2010-06-24 22:42:01 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
|
2010-04-15 04:52:55 +08:00
|
|
|
if (!AllocSTy)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// This the structure has an unreasonable number of fields, leave it
|
|
|
|
// alone.
|
|
|
|
if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
|
|
|
|
AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
|
|
|
|
|
|
|
|
// If this is a fixed size array, transform the Malloc to be an alloc of
|
|
|
|
// structs. malloc [100 x struct],1 -> malloc struct, 100
|
2012-08-29 23:32:21 +08:00
|
|
|
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
|
2015-03-05 02:43:29 +08:00
|
|
|
Type *IntPtrTy = DL.getIntPtrType(CI->getType());
|
|
|
|
unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
|
2010-04-15 04:52:55 +08:00
|
|
|
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
|
|
|
|
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
|
2016-04-29 16:07:22 +08:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
CI->getOperandBundlesAsDefs(OpBundles);
|
|
|
|
Instruction *Malloc =
|
|
|
|
CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements,
|
|
|
|
OpBundles, nullptr, CI->getName());
|
2010-04-15 04:52:55 +08:00
|
|
|
Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
|
|
|
|
CI->replaceAllUsesWith(Cast);
|
|
|
|
CI->eraseFromParent();
|
2012-06-22 08:25:01 +08:00
|
|
|
if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
|
|
|
|
CI = cast<CallInst>(BCI->getOperand(0));
|
|
|
|
else
|
2012-06-22 08:29:58 +08:00
|
|
|
CI = cast<CallInst>(Malloc);
|
2009-09-19 06:35:49 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2015-12-23 03:16:50 +08:00
|
|
|
PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL,
|
|
|
|
TLI);
|
2010-04-15 04:52:55 +08:00
|
|
|
return true;
|
2009-09-19 06:35:49 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2009-09-19 06:35:49 +08:00
|
|
|
return false;
|
2010-10-19 05:16:00 +08:00
|
|
|
}
|
2009-09-19 06:35:49 +08:00
|
|
|
|
2015-12-23 03:16:50 +08:00
|
|
|
// Try to optimize globals based on the knowledge that only one value (besides
|
|
|
|
// its initializer) is ever stored to the global.
|
|
|
|
static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
2012-02-06 03:56:38 +08:00
|
|
|
AtomicOrdering Ordering,
|
2015-03-05 02:43:29 +08:00
|
|
|
const DataLayout &DL,
|
2014-02-25 07:12:18 +08:00
|
|
|
TargetLibraryInfo *TLI) {
|
2008-12-16 05:20:32 +08:00
|
|
|
// Ignore no-op GEPs and bitcasts.
|
|
|
|
StoredOnceVal = StoredOnceVal->stripPointerCasts();
|
2004-10-10 05:48:45 +08:00
|
|
|
|
2004-10-11 07:14:11 +08:00
|
|
|
// If we are dealing with a pointer global that is initialized to null and
|
|
|
|
// only has one (non-null) value stored into it, then we can optimize any
|
|
|
|
// users of the loaded value (often calls and loads) that would trap if the
|
|
|
|
// value was null.
|
2010-02-16 19:11:14 +08:00
|
|
|
if (GV->getInitializer()->getType()->isPointerTy() &&
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
GV->getInitializer()->isNullValue() &&
|
|
|
|
!NullPointerIsDefined(
|
|
|
|
nullptr /* F */,
|
|
|
|
GV->getInitializer()->getType()->getPointerAddressSpace())) {
|
2004-10-11 07:14:11 +08:00
|
|
|
if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
|
|
|
|
if (GV->getInitializer()->getType() != SOVC->getType())
|
2011-05-22 15:15:13 +08:00
|
|
|
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2004-10-11 07:14:11 +08:00
|
|
|
// Optimize away any trapping uses of the loaded value.
|
2014-02-21 08:06:31 +08:00
|
|
|
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
|
2004-10-11 01:07:12 +08:00
|
|
|
return true;
|
2012-08-29 23:32:21 +08:00
|
|
|
} else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
|
|
|
|
Type *MallocType = getMallocAllocatedType(CI, TLI);
|
2015-12-23 03:16:50 +08:00
|
|
|
if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
|
|
|
|
Ordering, DL, TLI))
|
2009-11-07 08:16:28 +08:00
|
|
|
return true;
|
2004-10-11 07:14:11 +08:00
|
|
|
}
|
2004-10-10 05:48:45 +08:00
|
|
|
}
|
This patch implements two things (sorry).
First, it allows SRA of globals that have embedded arrays, implementing
GlobalOpt/globalsra-partial.llx. This comes up infrequently, but does allow,
for example, deleting several stores to dead parts of globals in dhrystone.
Second, this implements GlobalOpt/malloc-promote-*.llx, which is the
following nifty transformation:
Basically if a global pointer is initialized with malloc, and we can tell
that the program won't notice, we transform this:
struct foo *FooPtr;
...
FooPtr = malloc(sizeof(struct foo));
...
FooPtr->A FooPtr->B
Into:
struct foo FooPtrBody;
...
FooPtrBody.A FooPtrBody.B
This comes up occasionally, for example, the 'disp' global in 183.equake (where
the xform speeds the CBE version of the program up from 56.16s to 52.40s (7%)
on apoc), and the 'desired_accept', 'fixLRBT', 'macroArray', & 'key_queue'
globals in 300.twolf (speeding it up from 22.29s to 21.55s (3.4%)).
The nice thing about this xform is that it exposes the resulting global to
global variable optimization and makes alias analysis easier in addition to
eliminating a few loads.
llvm-svn: 16916
2004-10-11 13:54:41 +08:00
|
|
|
|
2004-10-10 05:48:45 +08:00
|
|
|
return false;
|
|
|
|
}
|
2004-10-09 04:59:28 +08:00
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// At this point, we have learned that the only two values ever stored into GV
|
|
|
|
/// are its initializer and OtherVal. See if we can shrink the global into a
|
|
|
|
/// boolean and select between the two values whenever it is used. This exposes
|
|
|
|
/// the values to other scalar optimizations.
|
2014-03-23 12:22:31 +08:00
|
|
|
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
|
2016-01-17 04:30:46 +08:00
|
|
|
Type *GVElType = GV->getValueType();
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2014-03-23 12:22:31 +08:00
|
|
|
// If GVElType is already i1, it is already shrunk. If the type of the GV is
|
|
|
|
// an FP value, pointer or vector, don't do this optimization because a select
|
|
|
|
// between them is very expensive and unlikely to lead to later
|
|
|
|
// simplification. In these cases, we typically end up with "cond ? v1 : v2"
|
|
|
|
// where v1 and v2 both require constant pool loads, a big loss.
|
2009-11-06 12:27:31 +08:00
|
|
|
if (GVElType == Type::getInt1Ty(GV->getContext()) ||
|
2010-02-16 00:12:20 +08:00
|
|
|
GVElType->isFloatingPointTy() ||
|
2010-02-16 19:11:14 +08:00
|
|
|
GVElType->isPointerTy() || GVElType->isVectorTy())
|
2008-01-14 09:17:44 +08:00
|
|
|
return false;
|
2010-07-12 22:13:15 +08:00
|
|
|
|
2008-01-14 09:17:44 +08:00
|
|
|
// Walk the use list of the global seeing if all the uses are load or store.
|
|
|
|
// If there is anything else, bail out.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : GV->users())
|
2010-07-12 22:13:15 +08:00
|
|
|
if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
|
2008-01-14 09:17:44 +08:00
|
|
|
return false;
|
2010-07-12 22:13:15 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
|
2014-03-23 12:22:31 +08:00
|
|
|
|
|
|
|
// Create the new global, initializing it to false.
|
|
|
|
GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
|
|
|
|
false,
|
|
|
|
GlobalValue::InternalLinkage,
|
|
|
|
ConstantInt::getFalse(GV->getContext()),
|
|
|
|
GV->getName()+".b",
|
|
|
|
GV->getThreadLocalMode(),
|
|
|
|
GV->getType()->getAddressSpace());
|
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
Summary:
Make sure that any new and optimized objects created during GlobalOPT copy all the attributes from the base object.
A good example of improper behavior in the current implementation is section information associated with the GlobalObject. If a section was set for it, and GlobalOpt is creating/modifying a new object based on this one (often copying the original name), without this change new object will be placed in a default section, resulting in inappropriate properties of the new variable.
The argument here is that if customer specified a section for a variable, any changes to it that compiler does should not cause it to change that section allocation.
Moreover, any other properties worth representation in copyAttributesFrom() should also be propagated.
Reviewers: jmolloy, joker-eph, joker.eph
Subscribers: slarin, joker.eph, rafael, tobiasvk, llvm-commits
Differential Revision: http://reviews.llvm.org/D16074
llvm-svn: 258556
2016-01-23 05:18:20 +08:00
|
|
|
NewGV->copyAttributesFrom(GV);
|
2015-10-14 01:51:03 +08:00
|
|
|
GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV);
|
2014-03-23 12:22:31 +08:00
|
|
|
|
2004-12-12 13:53:50 +08:00
|
|
|
Constant *InitVal = GV->getInitializer();
|
2009-11-06 12:27:31 +08:00
|
|
|
assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
|
2014-03-23 12:22:31 +08:00
|
|
|
"No reason to shrink to bool!");
|
2004-12-12 13:53:50 +08:00
|
|
|
|
2017-09-21 18:04:02 +08:00
|
|
|
SmallVector<DIGlobalVariableExpression *, 1> GVs;
|
|
|
|
GV->getDebugInfo(GVs);
|
|
|
|
|
2014-03-23 12:22:31 +08:00
|
|
|
// If initialized to zero and storing one into the global, we can use a cast
|
|
|
|
// instead of a select to synthesize the desired value.
|
|
|
|
bool IsOneZero = false;
|
2017-09-21 18:04:02 +08:00
|
|
|
bool EmitOneOrZero = true;
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)){
|
2014-03-23 12:22:31 +08:00
|
|
|
IsOneZero = InitVal->isNullValue() && CI->isOne();
|
2004-12-12 13:53:50 +08:00
|
|
|
|
2017-09-21 18:04:02 +08:00
|
|
|
if (ConstantInt *CIInit = dyn_cast<ConstantInt>(GV->getInitializer())){
|
|
|
|
uint64_t ValInit = CIInit->getZExtValue();
|
|
|
|
uint64_t ValOther = CI->getZExtValue();
|
|
|
|
uint64_t ValMinus = ValOther - ValInit;
|
|
|
|
|
|
|
|
for(auto *GVe : GVs){
|
|
|
|
DIGlobalVariable *DGV = GVe->getVariable();
|
|
|
|
DIExpression *E = GVe->getExpression();
|
|
|
|
|
|
|
|
// It is expected that the address of global optimized variable is on
|
|
|
|
// top of the stack. After optimization, value of that variable will
|
|
|
|
// be ether 0 for initial value or 1 for other value. The following
|
|
|
|
// expression should return constant integer value depending on the
|
|
|
|
// value at global object address:
|
|
|
|
// val * (ValOther - ValInit) + ValInit:
|
|
|
|
// DW_OP_deref DW_OP_constu <ValMinus>
|
|
|
|
// DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value
|
2018-04-28 05:41:36 +08:00
|
|
|
SmallVector<uint64_t, 12> Ops = {
|
|
|
|
dwarf::DW_OP_deref, dwarf::DW_OP_constu, ValMinus,
|
|
|
|
dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit,
|
|
|
|
dwarf::DW_OP_plus};
|
|
|
|
E = DIExpression::prependOpcodes(E, Ops, DIExpression::WithStackValue);
|
2017-09-21 18:04:02 +08:00
|
|
|
DIGlobalVariableExpression *DGVE =
|
|
|
|
DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E);
|
|
|
|
NewGV->addDebugInfo(DGVE);
|
|
|
|
}
|
|
|
|
EmitOneOrZero = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (EmitOneOrZero) {
|
|
|
|
// FIXME: This will only emit address for debugger on which will
|
|
|
|
// be written only 0 or 1.
|
|
|
|
for(auto *GV : GVs)
|
|
|
|
NewGV->addDebugInfo(GV);
|
|
|
|
}
|
|
|
|
|
2014-03-23 12:22:31 +08:00
|
|
|
while (!GV->use_empty()) {
|
|
|
|
Instruction *UI = cast<Instruction>(GV->user_back());
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
|
|
|
|
// Change the store into a boolean store.
|
|
|
|
bool StoringOther = SI->getOperand(0) == OtherVal;
|
|
|
|
// Only do this if we weren't storing a loaded value.
|
|
|
|
Value *StoreVal;
|
|
|
|
if (StoringOther || SI->getOperand(0) == InitVal) {
|
|
|
|
StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
|
|
|
|
StoringOther);
|
2013-02-14 07:00:51 +08:00
|
|
|
} else {
|
2014-03-23 12:22:31 +08:00
|
|
|
// Otherwise, we are storing a previously loaded copy. To do this,
|
|
|
|
// change the copy from copying the original value to just copying the
|
|
|
|
// bool.
|
|
|
|
Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
|
|
|
|
|
|
|
|
// If we've already replaced the input, StoredVal will be a cast or
|
|
|
|
// select instruction. If not, it will be a load of the original
|
|
|
|
// global.
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
|
|
|
|
assert(LI->getOperand(0) == GV && "Not a copy!");
|
|
|
|
// Insert a new load, to preserve the saved value.
|
|
|
|
StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
|
2017-07-12 06:23:00 +08:00
|
|
|
LI->getOrdering(), LI->getSyncScopeID(), LI);
|
2014-03-23 12:22:31 +08:00
|
|
|
} else {
|
|
|
|
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
|
|
|
|
"This is not a form that we understand!");
|
|
|
|
StoreVal = StoredVal->getOperand(0);
|
|
|
|
assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
|
|
|
|
}
|
2004-12-13 03:34:41 +08:00
|
|
|
}
|
2014-03-23 12:22:31 +08:00
|
|
|
new StoreInst(StoreVal, NewGV, false, 0,
|
2017-07-12 06:23:00 +08:00
|
|
|
SI->getOrdering(), SI->getSyncScopeID(), SI);
|
2014-03-23 12:22:31 +08:00
|
|
|
} else {
|
|
|
|
// Change the load into a load of bool then a select.
|
|
|
|
LoadInst *LI = cast<LoadInst>(UI);
|
|
|
|
LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
|
2017-07-12 06:23:00 +08:00
|
|
|
LI->getOrdering(), LI->getSyncScopeID(), LI);
|
2014-03-23 12:22:31 +08:00
|
|
|
Value *NSI;
|
|
|
|
if (IsOneZero)
|
|
|
|
NSI = new ZExtInst(NLI, LI->getType(), "", LI);
|
|
|
|
else
|
|
|
|
NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
|
|
|
|
NSI->takeName(LI);
|
|
|
|
LI->replaceAllUsesWith(NSI);
|
2009-03-06 09:39:36 +08:00
|
|
|
}
|
2014-03-23 12:22:31 +08:00
|
|
|
UI->eraseFromParent();
|
2004-12-12 13:53:50 +08:00
|
|
|
}
|
|
|
|
|
2014-03-23 12:22:31 +08:00
|
|
|
// Retain the name of the old global variable. People who are debugging their
|
|
|
|
// programs may expect these variables to be named the same.
|
|
|
|
NewGV->takeName(GV);
|
|
|
|
GV->eraseFromParent();
|
2008-01-14 09:17:44 +08:00
|
|
|
return true;
|
2004-12-12 13:53:50 +08:00
|
|
|
}
|
|
|
|
|
2018-06-12 19:16:56 +08:00
|
|
|
static bool deleteIfDead(
|
|
|
|
GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
|
2015-12-23 03:38:07 +08:00
|
|
|
GV.removeDeadConstantUsers();
|
|
|
|
|
2016-09-16 04:26:27 +08:00
|
|
|
if (!GV.isDiscardableIfUnused() && !GV.isDeclaration())
|
2015-12-23 03:38:07 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (const Comdat *C = GV.getComdat())
|
|
|
|
if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool Dead;
|
|
|
|
if (auto *F = dyn_cast<Function>(&GV))
|
2016-09-16 04:26:27 +08:00
|
|
|
Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead();
|
2015-12-23 03:38:07 +08:00
|
|
|
else
|
|
|
|
Dead = GV.use_empty();
|
|
|
|
if (!Dead)
|
|
|
|
return false;
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
|
2015-12-23 03:38:07 +08:00
|
|
|
GV.eraseFromParent();
|
|
|
|
++NumDeleted;
|
|
|
|
return true;
|
|
|
|
}
|
2004-12-12 13:53:50 +08:00
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
static bool isPointerValueDeadOnEntryToFunction(
|
|
|
|
const Function *F, GlobalValue *GV,
|
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree) {
|
2015-11-15 22:21:37 +08:00
|
|
|
// Find all uses of GV. We expect them all to be in F, and if we can't
|
|
|
|
// identify any of the uses we bail out.
|
|
|
|
//
|
|
|
|
// On each of these uses, identify if the memory that GV points to is
|
|
|
|
// used/required/live at the start of the function. If it is not, for example
|
|
|
|
// if the first thing the function does is store to the GV, the GV can
|
|
|
|
// possibly be demoted.
|
|
|
|
//
|
|
|
|
// We don't do an exhaustive search for memory operations - simply look
|
|
|
|
// through bitcasts as they're quite common and benign.
|
|
|
|
const DataLayout &DL = GV->getParent()->getDataLayout();
|
|
|
|
SmallVector<LoadInst *, 4> Loads;
|
|
|
|
SmallVector<StoreInst *, 4> Stores;
|
|
|
|
for (auto *U : GV->users()) {
|
|
|
|
if (Operator::getOpcode(U) == Instruction::BitCast) {
|
|
|
|
for (auto *UU : U->users()) {
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(UU))
|
|
|
|
Loads.push_back(LI);
|
|
|
|
else if (auto *SI = dyn_cast<StoreInst>(UU))
|
|
|
|
Stores.push_back(SI);
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *I = dyn_cast<Instruction>(U);
|
|
|
|
if (!I)
|
|
|
|
return false;
|
|
|
|
assert(I->getParent()->getParent() == F);
|
|
|
|
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(I))
|
|
|
|
Loads.push_back(LI);
|
|
|
|
else if (auto *SI = dyn_cast<StoreInst>(I))
|
|
|
|
Stores.push_back(SI);
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have identified all uses of GV into loads and stores. Now check if all
|
|
|
|
// of them are known not to depend on the value of the global at the function
|
|
|
|
// entry point. We do this by ensuring that every load is dominated by at
|
|
|
|
// least one store.
|
2016-04-26 08:27:56 +08:00
|
|
|
auto &DT = LookupDomTree(*const_cast<Function *>(F));
|
2015-11-15 22:21:37 +08:00
|
|
|
|
2015-11-16 18:16:22 +08:00
|
|
|
// The below check is quadratic. Check we're not going to do too many tests.
|
|
|
|
// FIXME: Even though this will always have worst-case quadratic time, we
|
|
|
|
// could put effort into minimizing the average time by putting stores that
|
|
|
|
// have been shown to dominate at least one load at the beginning of the
|
|
|
|
// Stores array, making subsequent dominance checks more likely to succeed
|
|
|
|
// early.
|
|
|
|
//
|
|
|
|
// The threshold here is fairly large because global->local demotion is a
|
|
|
|
// very powerful optimization should it fire.
|
|
|
|
const unsigned Threshold = 100;
|
|
|
|
if (Loads.size() * Stores.size() > Threshold)
|
|
|
|
return false;
|
|
|
|
|
2015-11-15 22:21:37 +08:00
|
|
|
for (auto *L : Loads) {
|
|
|
|
auto *LTy = L->getType();
|
2016-08-12 05:15:00 +08:00
|
|
|
if (none_of(Stores, [&](const StoreInst *S) {
|
2015-11-15 22:21:37 +08:00
|
|
|
auto *STy = S->getValueOperand()->getType();
|
|
|
|
// The load is only dominated by the store if DomTree says so
|
|
|
|
// and the number of bits loaded in L is less than or equal to
|
|
|
|
// the number of bits stored in S.
|
|
|
|
return DT.dominates(S, L) &&
|
|
|
|
DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
|
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// All loads have known dependences inside F, so the global can be localized.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-11-20 02:04:33 +08:00
|
|
|
/// C may have non-instruction users. Can all of those users be turned into
|
|
|
|
/// instructions?
|
|
|
|
static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) {
|
|
|
|
// We don't do this exhaustively. The most common pattern that we really need
|
|
|
|
// to care about is a constant GEP or constant bitcast - so just looking
|
|
|
|
// through one single ConstantExpr.
|
|
|
|
//
|
|
|
|
// The set of constants that this function returns true for must be able to be
|
|
|
|
// handled by makeAllConstantUsesInstructions.
|
|
|
|
for (auto *U : C->users()) {
|
|
|
|
if (isa<Instruction>(U))
|
|
|
|
continue;
|
|
|
|
if (!isa<ConstantExpr>(U))
|
|
|
|
// Non instruction, non-constantexpr user; cannot convert this.
|
|
|
|
return false;
|
|
|
|
for (auto *UU : U->users())
|
|
|
|
if (!isa<Instruction>(UU))
|
|
|
|
// A constantexpr used by another constant. We don't try and recurse any
|
|
|
|
// further but just bail out at this point.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// C may have non-instruction users, and
|
|
|
|
/// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the
|
|
|
|
/// non-instruction users to instructions.
|
|
|
|
static void makeAllConstantUsesInstructions(Constant *C) {
|
|
|
|
SmallVector<ConstantExpr*,4> Users;
|
|
|
|
for (auto *U : C->users()) {
|
|
|
|
if (isa<ConstantExpr>(U))
|
|
|
|
Users.push_back(cast<ConstantExpr>(U));
|
|
|
|
else
|
|
|
|
// We should never get here; allNonInstructionUsersCanBeMadeInstructions
|
|
|
|
// should not have returned true for C.
|
|
|
|
assert(
|
|
|
|
isa<Instruction>(U) &&
|
|
|
|
"Can't transform non-constantexpr non-instruction to instruction!");
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<Value*,4> UUsers;
|
|
|
|
for (auto *U : Users) {
|
|
|
|
UUsers.clear();
|
|
|
|
for (auto *UU : U->users())
|
|
|
|
UUsers.push_back(UU);
|
|
|
|
for (auto *UU : UUsers) {
|
|
|
|
Instruction *UI = cast<Instruction>(UU);
|
|
|
|
Instruction *NewU = U->getAsInstruction();
|
|
|
|
NewU->insertBefore(UI);
|
|
|
|
UI->replaceUsesOfWith(U, NewU);
|
|
|
|
}
|
2017-04-28 02:39:08 +08:00
|
|
|
// We've replaced all the uses, so destroy the constant. (destroyConstant
|
|
|
|
// will update value handles and metadata.)
|
|
|
|
U->destroyConstant();
|
2015-11-20 02:04:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Analyze the specified global variable and optimize
|
2011-01-20 00:32:21 +08:00
|
|
|
/// it if possible. If we make a change, return true.
|
2016-04-26 08:27:56 +08:00
|
|
|
static bool processInternalGlobal(
|
|
|
|
GlobalVariable *GV, const GlobalStatus &GS, TargetLibraryInfo *TLI,
|
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree) {
|
2015-03-05 02:43:29 +08:00
|
|
|
auto &DL = GV->getParent()->getDataLayout();
|
2015-11-15 22:21:37 +08:00
|
|
|
// If this is a first class global and has only one accessing function and
|
|
|
|
// this function is non-recursive, we replace the global with a local alloca
|
|
|
|
// in this function.
|
2013-10-08 03:03:24 +08:00
|
|
|
//
|
2013-12-05 13:44:44 +08:00
|
|
|
// NOTE: It doesn't make sense to promote non-single-value types since we
|
2013-10-08 03:03:24 +08:00
|
|
|
// are just replacing static memory to stack memory.
|
|
|
|
//
|
|
|
|
// If the global is in different address space, don't bring it to stack.
|
|
|
|
if (!GS.HasMultipleAccessingFunctions &&
|
2015-11-20 02:04:33 +08:00
|
|
|
GS.AccessingFunction &&
|
2016-01-17 04:30:46 +08:00
|
|
|
GV->getValueType()->isSingleValueType() &&
|
2015-11-15 22:21:37 +08:00
|
|
|
GV->getType()->getAddressSpace() == 0 &&
|
|
|
|
!GV->isExternallyInitialized() &&
|
2015-11-20 02:04:33 +08:00
|
|
|
allNonInstructionUsersCanBeMadeInstructions(GV) &&
|
2015-11-15 22:21:37 +08:00
|
|
|
GS.AccessingFunction->doesNotRecurse() &&
|
2016-04-26 08:27:56 +08:00
|
|
|
isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV,
|
|
|
|
LookupDomTree)) {
|
2017-04-11 06:27:50 +08:00
|
|
|
const DataLayout &DL = GV->getParent()->getDataLayout();
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
|
2013-10-08 03:03:24 +08:00
|
|
|
Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
|
|
|
|
->getEntryBlock().begin());
|
2016-01-17 04:30:46 +08:00
|
|
|
Type *ElemTy = GV->getValueType();
|
2013-10-08 03:03:24 +08:00
|
|
|
// FIXME: Pass Global's alignment when globals have alignment
|
2017-04-11 06:27:50 +08:00
|
|
|
AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr,
|
2014-04-25 13:29:35 +08:00
|
|
|
GV->getName(), &FirstI);
|
2013-10-08 03:03:24 +08:00
|
|
|
if (!isa<UndefValue>(GV->getInitializer()))
|
|
|
|
new StoreInst(GV->getInitializer(), Alloca, &FirstI);
|
|
|
|
|
2015-11-20 02:04:33 +08:00
|
|
|
makeAllConstantUsesInstructions(GV);
|
2016-04-26 08:27:56 +08:00
|
|
|
|
2013-10-08 03:03:24 +08:00
|
|
|
GV->replaceAllUsesWith(Alloca);
|
|
|
|
GV->eraseFromParent();
|
|
|
|
++NumLocalized;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-01-18 12:36:06 +08:00
|
|
|
// If the global is never loaded (but may be stored to), it is dead.
|
|
|
|
// Delete it now.
|
2013-10-18 02:18:52 +08:00
|
|
|
if (!GS.IsLoaded) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
|
2004-10-09 11:32:52 +08:00
|
|
|
|
2012-07-24 15:21:08 +08:00
|
|
|
bool Changed;
|
|
|
|
if (isLeakCheckerRoot(GV)) {
|
|
|
|
// Delete any constant stores to the global.
|
2012-08-29 23:32:21 +08:00
|
|
|
Changed = CleanupPointerRootUsers(GV, TLI);
|
2012-07-24 15:21:08 +08:00
|
|
|
} else {
|
|
|
|
// Delete any stores we can find to the global. We may not be able to
|
|
|
|
// make it completely dead though.
|
2014-02-21 08:06:31 +08:00
|
|
|
Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
|
2012-07-24 15:21:08 +08:00
|
|
|
}
|
2011-01-18 12:36:06 +08:00
|
|
|
|
|
|
|
// If the global is dead now, delete it.
|
|
|
|
if (GV->use_empty()) {
|
|
|
|
GV->eraseFromParent();
|
|
|
|
++NumDeleted;
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
|
2016-04-25 18:48:29 +08:00
|
|
|
}
|
|
|
|
if (GS.StoredType <= GlobalStatus::InitializerStored) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
|
2011-01-18 12:36:06 +08:00
|
|
|
GV->setConstant(true);
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2011-01-18 12:36:06 +08:00
|
|
|
// Clean up any obviously simplifiable users now.
|
2014-02-21 08:06:31 +08:00
|
|
|
CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2011-01-18 12:36:06 +08:00
|
|
|
// If the global is dead now, just nuke it.
|
|
|
|
if (GV->use_empty()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
|
|
|
|
<< "all users and delete global!\n");
|
2011-01-18 12:36:06 +08:00
|
|
|
GV->eraseFromParent();
|
|
|
|
++NumDeleted;
|
2016-04-25 18:48:29 +08:00
|
|
|
return true;
|
2011-01-18 12:36:06 +08:00
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2016-04-25 18:48:29 +08:00
|
|
|
// Fall through to the next check; see if we can optimize further.
|
2011-01-18 12:36:06 +08:00
|
|
|
++NumMarked;
|
2016-04-25 18:48:29 +08:00
|
|
|
}
|
|
|
|
if (!GV->getInitializer()->getType()->isSingleValueType()) {
|
2015-03-05 02:43:29 +08:00
|
|
|
const DataLayout &DL = GV->getParent()->getDataLayout();
|
2015-12-23 03:16:50 +08:00
|
|
|
if (SRAGlobal(GV, DL))
|
2015-03-05 02:43:29 +08:00
|
|
|
return true;
|
2016-04-25 18:48:29 +08:00
|
|
|
}
|
|
|
|
if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) {
|
2011-01-18 12:36:06 +08:00
|
|
|
// If the initial value for the global was an undef value, and if only
|
|
|
|
// one other value was stored into it, we can just change the
|
|
|
|
// initializer to be the stored value, then delete all stores to the
|
|
|
|
// global. This allows us to mark it constant.
|
|
|
|
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
|
|
|
|
if (isa<UndefValue>(GV->getInitializer())) {
|
|
|
|
// Change the initial value here.
|
|
|
|
GV->setInitializer(SOVConstant);
|
|
|
|
|
|
|
|
// Clean up any obviously simplifiable users now.
|
2014-02-21 08:06:31 +08:00
|
|
|
CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
|
2011-01-18 12:36:06 +08:00
|
|
|
|
|
|
|
if (GV->use_empty()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to "
|
|
|
|
<< "simplify all users and delete global!\n");
|
2011-01-18 12:36:06 +08:00
|
|
|
GV->eraseFromParent();
|
|
|
|
++NumDeleted;
|
|
|
|
}
|
|
|
|
++NumSubstitute;
|
|
|
|
return true;
|
2004-10-09 04:59:28 +08:00
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2011-01-18 12:36:06 +08:00
|
|
|
// Try to optimize globals based on the knowledge that only one value
|
|
|
|
// (besides its initializer) is ever stored to the global.
|
2015-12-23 03:16:50 +08:00
|
|
|
if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI))
|
2004-10-09 04:59:28 +08:00
|
|
|
return true;
|
2004-10-17 02:09:00 +08:00
|
|
|
|
2014-03-23 12:22:31 +08:00
|
|
|
// Otherwise, if the global was not a boolean, we can shrink it to be a
|
|
|
|
// boolean.
|
2013-09-10 06:00:13 +08:00
|
|
|
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
|
2016-04-07 05:19:33 +08:00
|
|
|
if (GS.Ordering == AtomicOrdering::NotAtomic) {
|
2014-03-23 12:22:31 +08:00
|
|
|
if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
|
2013-09-10 06:00:13 +08:00
|
|
|
++NumShrunkToBool;
|
|
|
|
return true;
|
|
|
|
}
|
2011-01-18 12:36:06 +08:00
|
|
|
}
|
2013-09-10 06:00:13 +08:00
|
|
|
}
|
2004-10-09 04:59:28 +08:00
|
|
|
}
|
2011-01-18 12:36:06 +08:00
|
|
|
|
2004-10-09 04:59:28 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
/// Analyze the specified global variable and optimize it if possible. If we
|
|
|
|
/// make a change, return true.
|
|
|
|
static bool
|
|
|
|
processGlobal(GlobalValue &GV, TargetLibraryInfo *TLI,
|
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree) {
|
2016-06-15 05:01:22 +08:00
|
|
|
if (GV.getName().startswith("llvm."))
|
2016-04-26 08:27:56 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
GlobalStatus GS;
|
|
|
|
|
|
|
|
if (GlobalStatus::analyzeGlobal(&GV, GS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool Changed = false;
|
2016-06-15 05:01:22 +08:00
|
|
|
if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) {
|
|
|
|
auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global
|
|
|
|
: GlobalValue::UnnamedAddr::Local;
|
|
|
|
if (NewUnnamedAddr != GV.getUnnamedAddr()) {
|
|
|
|
GV.setUnnamedAddr(NewUnnamedAddr);
|
|
|
|
NumUnnamed++;
|
|
|
|
Changed = true;
|
|
|
|
}
|
2016-04-26 08:27:56 +08:00
|
|
|
}
|
|
|
|
|
2016-06-15 05:01:22 +08:00
|
|
|
// Do more involved optimizations if the global is internal.
|
|
|
|
if (!GV.hasLocalLinkage())
|
|
|
|
return Changed;
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
auto *GVar = dyn_cast<GlobalVariable>(&GV);
|
|
|
|
if (!GVar)
|
|
|
|
return Changed;
|
|
|
|
|
|
|
|
if (GVar->isConstant() || !GVar->hasInitializer())
|
|
|
|
return Changed;
|
|
|
|
|
|
|
|
return processInternalGlobal(GVar, GS, TLI, LookupDomTree) || Changed;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Walk all of the direct calls of the specified function, changing them to
|
|
|
|
/// FastCC.
|
2005-05-09 06:18:06 +08:00
|
|
|
static void ChangeCalleesToFastCall(Function *F) {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : F->users()) {
|
|
|
|
if (isa<BlockAddress>(U))
|
2012-05-12 16:30:16 +08:00
|
|
|
continue;
|
2014-03-09 11:16:01 +08:00
|
|
|
CallSite CS(cast<Instruction>(U));
|
|
|
|
CS.setCallingConv(CallingConv::Fast);
|
2005-05-09 06:18:06 +08:00
|
|
|
}
|
|
|
|
}
|
2004-10-09 04:59:28 +08:00
|
|
|
|
2017-04-20 07:26:44 +08:00
|
|
|
static AttributeList StripNest(LLVMContext &C, AttributeList Attrs) {
|
|
|
|
// There can be at most one attribute set with a nest attribute.
|
|
|
|
unsigned NestIndex;
|
|
|
|
if (Attrs.hasAttrSomewhere(Attribute::Nest, &NestIndex))
|
|
|
|
return Attrs.removeAttribute(C, NestIndex, Attribute::Nest);
|
2008-02-17 04:56:04 +08:00
|
|
|
return Attrs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void RemoveNestAttribute(Function *F) {
|
2012-10-14 14:39:53 +08:00
|
|
|
F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *U : F->users()) {
|
|
|
|
if (isa<BlockAddress>(U))
|
2012-05-12 16:30:16 +08:00
|
|
|
continue;
|
2014-03-09 11:16:01 +08:00
|
|
|
CallSite CS(cast<Instruction>(U));
|
|
|
|
CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
|
2008-02-17 04:56:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-27 03:57:30 +08:00
|
|
|
/// Return true if this is a calling convention that we'd like to change. The
|
|
|
|
/// idea here is that we don't want to mess with the convention if the user
|
|
|
|
/// explicitly requested something with performance implications like coldcc,
|
|
|
|
/// GHC, or anyregcc.
|
2018-01-31 00:17:22 +08:00
|
|
|
static bool hasChangeableCC(Function *F) {
|
2014-02-27 03:57:30 +08:00
|
|
|
CallingConv::ID CC = F->getCallingConv();
|
2018-03-01 06:28:44 +08:00
|
|
|
|
2014-02-27 03:57:30 +08:00
|
|
|
// FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
|
2018-03-01 06:28:44 +08:00
|
|
|
if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Change CC for the whole chain of musttail calls when possible.
|
|
|
|
//
|
|
|
|
// Can't change CC of the function that either has musttail calls, or is a
|
|
|
|
// musttail callee itself
|
|
|
|
for (User *U : F->users()) {
|
|
|
|
if (isa<BlockAddress>(U))
|
|
|
|
continue;
|
|
|
|
CallInst* CI = dyn_cast<CallInst>(U);
|
|
|
|
if (!CI)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (CI->isMustTailCall())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (BasicBlock &BB : *F)
|
|
|
|
if (BB.getTerminatingMustTailCall())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2014-02-27 03:57:30 +08:00
|
|
|
}
|
|
|
|
|
2018-01-31 00:17:22 +08:00
|
|
|
/// Return true if the block containing the call site has a BlockFrequency of
|
|
|
|
/// less than ColdCCRelFreq% of the entry block.
|
|
|
|
static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) {
|
|
|
|
const BranchProbability ColdProb(ColdCCRelFreq, 100);
|
|
|
|
auto CallSiteBB = CS.getInstruction()->getParent();
|
|
|
|
auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB);
|
|
|
|
auto CallerEntryFreq =
|
|
|
|
CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock()));
|
|
|
|
return CallSiteFreq < CallerEntryFreq * ColdProb;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function checks if the input function F is cold at all call sites. It
|
|
|
|
// also looks each call site's containing function, returning false if the
|
|
|
|
// caller function contains other non cold calls. The input vector AllCallsCold
|
|
|
|
// contains a list of functions that only have call sites in cold blocks.
|
|
|
|
static bool
|
|
|
|
isValidCandidateForColdCC(Function &F,
|
|
|
|
function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
|
|
|
|
const std::vector<Function *> &AllCallsCold) {
|
|
|
|
|
|
|
|
if (F.user_empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (User *U : F.users()) {
|
|
|
|
if (isa<BlockAddress>(U))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
CallSite CS(cast<Instruction>(U));
|
|
|
|
Function *CallerFunc = CS.getInstruction()->getParent()->getParent();
|
|
|
|
BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc);
|
|
|
|
if (!isColdCallSite(CS, CallerBFI))
|
|
|
|
return false;
|
|
|
|
auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc);
|
|
|
|
if (It == AllCallsCold.end())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void changeCallSitesToColdCC(Function *F) {
|
|
|
|
for (User *U : F->users()) {
|
|
|
|
if (isa<BlockAddress>(U))
|
|
|
|
continue;
|
|
|
|
CallSite CS(cast<Instruction>(U));
|
|
|
|
CS.setCallingConv(CallingConv::Cold);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function iterates over all the call instructions in the input Function
|
|
|
|
// and checks that all call sites are in cold blocks and are allowed to use the
|
|
|
|
// coldcc calling convention.
|
|
|
|
static bool
|
|
|
|
hasOnlyColdCalls(Function &F,
|
|
|
|
function_ref<BlockFrequencyInfo &(Function &)> GetBFI) {
|
|
|
|
for (BasicBlock &BB : F) {
|
|
|
|
for (Instruction &I : BB) {
|
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(&I)) {
|
|
|
|
CallSite CS(cast<Instruction>(CI));
|
|
|
|
// Skip over isline asm instructions since they aren't function calls.
|
|
|
|
if (CI->isInlineAsm())
|
|
|
|
continue;
|
|
|
|
Function *CalledFn = CI->getCalledFunction();
|
|
|
|
if (!CalledFn)
|
|
|
|
return false;
|
|
|
|
if (!CalledFn->hasLocalLinkage())
|
|
|
|
return false;
|
|
|
|
// Skip over instrinsics since they won't remain as function calls.
|
|
|
|
if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic)
|
|
|
|
continue;
|
|
|
|
// Check if it's valid to use coldcc calling convention.
|
|
|
|
if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() ||
|
|
|
|
CalledFn->hasAddressTaken())
|
|
|
|
return false;
|
|
|
|
BlockFrequencyInfo &CallerBFI = GetBFI(F);
|
|
|
|
if (!isColdCallSite(CS, CallerBFI))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
static bool
|
|
|
|
OptimizeFunctions(Module &M, TargetLibraryInfo *TLI,
|
2018-01-31 00:17:22 +08:00
|
|
|
function_ref<TargetTransformInfo &(Function &)> GetTTI,
|
|
|
|
function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
|
2016-04-26 08:27:56 +08:00
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree,
|
2018-06-12 19:16:56 +08:00
|
|
|
SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
|
2018-01-31 00:17:22 +08:00
|
|
|
|
2004-10-07 12:16:33 +08:00
|
|
|
bool Changed = false;
|
2018-01-31 00:17:22 +08:00
|
|
|
|
|
|
|
std::vector<Function *> AllCallsCold;
|
|
|
|
for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) {
|
|
|
|
Function *F = &*FI++;
|
|
|
|
if (hasOnlyColdCalls(*F, GetBFI))
|
|
|
|
AllCallsCold.push_back(F);
|
|
|
|
}
|
|
|
|
|
2005-09-26 09:43:45 +08:00
|
|
|
// Optimize functions.
|
|
|
|
for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
|
2015-10-14 01:51:03 +08:00
|
|
|
Function *F = &*FI++;
|
2018-01-31 00:17:22 +08:00
|
|
|
|
2018-02-22 22:42:08 +08:00
|
|
|
// Don't perform global opt pass on naked functions; we don't want fast
|
|
|
|
// calling conventions for naked functions.
|
|
|
|
if (F->hasFnAttribute(Attribute::Naked))
|
|
|
|
continue;
|
|
|
|
|
2009-03-06 18:21:56 +08:00
|
|
|
// Functions without names cannot be referenced outside this module.
|
2014-07-01 23:26:50 +08:00
|
|
|
if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
|
2009-03-06 18:21:56 +08:00
|
|
|
F->setLinkage(GlobalValue::InternalLinkage);
|
2014-10-08 15:23:31 +08:00
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
if (deleteIfDead(*F, NotDiscardableComdats)) {
|
2005-09-26 09:43:45 +08:00
|
|
|
Changed = true;
|
2015-12-23 03:26:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
2015-12-23 04:43:30 +08:00
|
|
|
|
2017-07-13 23:40:59 +08:00
|
|
|
// LLVM's definition of dominance allows instructions that are cyclic
|
|
|
|
// in unreachable blocks, e.g.:
|
|
|
|
// %pat = select i1 %condition, @global, i16* %pat
|
|
|
|
// because any instruction dominates an instruction in a block that's
|
|
|
|
// not reachable from entry.
|
|
|
|
// So, remove unreachable blocks from the function, because a) there's
|
|
|
|
// no point in analyzing them and b) GlobalOpt should otherwise grow
|
|
|
|
// some more complicated logic to break these cycles.
|
|
|
|
// Removing unreachable blocks might invalidate the dominator so we
|
|
|
|
// recalculate it.
|
|
|
|
if (!F->isDeclaration()) {
|
|
|
|
if (removeUnreachableBlocks(*F)) {
|
|
|
|
auto &DT = LookupDomTree(*F);
|
|
|
|
DT.recalculate(*F);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
Changed |= processGlobal(*F, TLI, LookupDomTree);
|
2015-12-23 04:43:30 +08:00
|
|
|
|
2015-12-23 03:26:18 +08:00
|
|
|
if (!F->hasLocalLinkage())
|
|
|
|
continue;
|
2018-01-31 00:17:22 +08:00
|
|
|
|
|
|
|
if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) {
|
|
|
|
NumInternalFunc++;
|
|
|
|
TargetTransformInfo &TTI = GetTTI(*F);
|
|
|
|
// Change the calling convention to coldcc if either stress testing is
|
|
|
|
// enabled or the target would like to use coldcc on functions which are
|
|
|
|
// cold at all call sites and the callers contain no other non coldcc
|
|
|
|
// calls.
|
|
|
|
if (EnableColdCCStressTest ||
|
|
|
|
(isValidCandidateForColdCC(*F, GetBFI, AllCallsCold) &&
|
|
|
|
TTI.useColdCCForColdCall(*F))) {
|
|
|
|
F->setCallingConv(CallingConv::Cold);
|
|
|
|
changeCallSitesToColdCC(F);
|
|
|
|
Changed = true;
|
|
|
|
NumColdCC++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hasChangeableCC(F) && !F->isVarArg() &&
|
2015-12-23 03:26:18 +08:00
|
|
|
!F->hasAddressTaken()) {
|
|
|
|
// If this function has a calling convention worth changing, is not a
|
|
|
|
// varargs function, and is only called directly, promote it to use the
|
|
|
|
// Fast calling convention.
|
|
|
|
F->setCallingConv(CallingConv::Fast);
|
|
|
|
ChangeCalleesToFastCall(F);
|
|
|
|
++NumFastCallFns;
|
|
|
|
Changed = true;
|
|
|
|
}
|
2008-02-17 04:56:04 +08:00
|
|
|
|
2015-12-23 03:26:18 +08:00
|
|
|
if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
|
|
|
|
!F->hasAddressTaken()) {
|
|
|
|
// The function is not used by a trampoline intrinsic, so it is safe
|
|
|
|
// to remove the 'nest' attribute.
|
|
|
|
RemoveNestAttribute(F);
|
|
|
|
++NumNestRemoved;
|
|
|
|
Changed = true;
|
2005-09-26 09:43:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
2004-10-07 12:16:33 +08:00
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
static bool
|
|
|
|
OptimizeGlobalVars(Module &M, TargetLibraryInfo *TLI,
|
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree,
|
2018-06-12 19:16:56 +08:00
|
|
|
SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
|
2005-09-26 09:43:45 +08:00
|
|
|
bool Changed = false;
|
2014-06-28 02:19:56 +08:00
|
|
|
|
2005-09-26 09:43:45 +08:00
|
|
|
for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
|
|
|
|
GVI != E; ) {
|
2015-10-14 01:51:03 +08:00
|
|
|
GlobalVariable *GV = &*GVI++;
|
2009-03-06 18:21:56 +08:00
|
|
|
// Global variables without names cannot be referenced outside this module.
|
2014-07-01 23:26:50 +08:00
|
|
|
if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
|
2009-03-06 18:21:56 +08:00
|
|
|
GV->setLinkage(GlobalValue::InternalLinkage);
|
2009-11-24 00:22:21 +08:00
|
|
|
// Simplify the initializer.
|
|
|
|
if (GV->hasInitializer())
|
2016-07-29 11:27:26 +08:00
|
|
|
if (auto *C = dyn_cast<Constant>(GV->getInitializer())) {
|
2015-03-05 02:43:29 +08:00
|
|
|
auto &DL = M.getDataLayout();
|
2016-07-29 11:27:26 +08:00
|
|
|
Constant *New = ConstantFoldConstant(C, DL, TLI);
|
|
|
|
if (New && New != C)
|
2009-11-24 00:22:21 +08:00
|
|
|
GV->setInitializer(New);
|
|
|
|
}
|
2011-01-20 00:32:21 +08:00
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
if (deleteIfDead(*GV, NotDiscardableComdats)) {
|
2015-12-23 04:43:30 +08:00
|
|
|
Changed = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
Changed |= processGlobal(*GV, TLI, LookupDomTree);
|
2005-09-26 09:43:45 +08:00
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Evaluate a piece of a constantexpr store into a global initializer. This
|
|
|
|
/// returns 'Init' modified to reflect 'Val' stored into it. At this point, the
|
|
|
|
/// GEP operands of Addr [0, OpNo) have been stepped into.
|
2015-07-23 06:26:54 +08:00
|
|
|
static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
|
|
|
ConstantExpr *Addr, unsigned OpNo) {
|
|
|
|
// Base case of the recursion.
|
|
|
|
if (OpNo == Addr->getNumOperands()) {
|
|
|
|
assert(Val->getType() == Init->getType() && "Type mismatch!");
|
|
|
|
return Val;
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<Constant*, 32> Elts;
|
|
|
|
if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
|
|
|
|
// Break up the constant into its elements.
|
|
|
|
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
|
|
|
|
Elts.push_back(Init->getAggregateElement(i));
|
|
|
|
|
|
|
|
// Replace the element that we are supposed to.
|
|
|
|
ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
|
|
|
|
unsigned Idx = CU->getZExtValue();
|
|
|
|
assert(Idx < STy->getNumElements() && "Struct index out of range!");
|
|
|
|
Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
|
|
|
|
|
|
|
|
// Return the modified struct.
|
|
|
|
return ConstantStruct::get(STy, Elts);
|
|
|
|
}
|
|
|
|
|
|
|
|
ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
|
|
|
|
SequentialType *InitTy = cast<SequentialType>(Init->getType());
|
2016-12-02 11:20:58 +08:00
|
|
|
uint64_t NumElts = InitTy->getNumElements();
|
2015-07-23 06:26:54 +08:00
|
|
|
|
|
|
|
// Break up the array into elements.
|
|
|
|
for (uint64_t i = 0, e = NumElts; i != e; ++i)
|
|
|
|
Elts.push_back(Init->getAggregateElement(i));
|
|
|
|
|
|
|
|
assert(CI->getZExtValue() < NumElts);
|
|
|
|
Elts[CI->getZExtValue()] =
|
|
|
|
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
|
|
|
|
|
|
|
|
if (Init->getType()->isArrayTy())
|
|
|
|
return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
|
|
|
|
return ConstantVector::get(Elts);
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// We have decided that Addr (which satisfies the predicate
|
2015-07-23 06:26:54 +08:00
|
|
|
/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
|
|
|
|
static void CommitValueTo(Constant *Val, Constant *Addr) {
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
|
|
|
|
assert(GV->hasInitializer());
|
|
|
|
GV->setInitializer(Val);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ConstantExpr *CE = cast<ConstantExpr>(Addr);
|
|
|
|
GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
|
|
|
|
GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
|
|
|
|
}
|
|
|
|
|
[GlobalOpt] Improve common case efficiency of static global initializer evaluation
For very, very large global initializers which can be statically evaluated, the
code would create vectors of temporary Constants, modifying them in place,
before committing the resulting Constant aggregate to the global's initializer
value. This had effectively O(n^2) complexity in the size of the global
initializer and would cause memory and non-termination issues compiling some
workloads.
This change performs the static initializer evaluation and creation in batches,
once for each global in the evaluated IR memory. The existing code is maintained
as a last resort when the initializers are more complex than simple values in a
large aggregate. This should theoretically by NFC, no test as the example case
is massive. The existing test cases pass with this, as well as the llvm test
suite.
To give an example, consider the following C++ code adapted from the clang
regression tests:
struct S {
int n = 10;
int m = 2 * n;
S(int a) : n(a) {}
};
template<typename T>
struct U {
T *r = &q;
T q = 42;
U *p = this;
};
U<S> e;
The global static constructor for 'e' will need to initialize 'r' and 'p' of
the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
members. This batch algorithm will simply use general CommitValueTo() method
to handle the complex nested S struct initialization of 'q', before
processing the outermost members in a single batch. Using CommitValueTo() to
handle member in the outer struct is inefficient when the struct/array is
very large as we end up creating and destroy constant arrays for each
initialization.
For the above case, we expect the following IR to be generated:
%struct.U = type { %struct.S*, %struct.S, %struct.U* }
%struct.S = type { i32, i32 }
@e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
i64 0, i32 1),
%struct.S { i32 42, i32 84 }, %struct.U* @e }
The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
constant expression, while the other two elements of @e are "simple".
Differential Revision: https://reviews.llvm.org/D42612
llvm-svn: 323933
2018-02-01 07:56:07 +08:00
|
|
|
/// Given a map of address -> value, where addresses are expected to be some form
|
|
|
|
/// of either a global or a constant GEP, set the initializer for the address to
|
|
|
|
/// be the value. This performs mostly the same function as CommitValueTo()
|
|
|
|
/// and EvaluateStoreInto() but is optimized to be more efficient for the common
|
|
|
|
/// case where the set of addresses are GEPs sharing the same underlying global,
|
|
|
|
/// processing the GEPs in batches rather than individually.
|
|
|
|
///
|
|
|
|
/// To give an example, consider the following C++ code adapted from the clang
|
|
|
|
/// regression tests:
|
|
|
|
/// struct S {
|
|
|
|
/// int n = 10;
|
|
|
|
/// int m = 2 * n;
|
|
|
|
/// S(int a) : n(a) {}
|
|
|
|
/// };
|
|
|
|
///
|
|
|
|
/// template<typename T>
|
|
|
|
/// struct U {
|
|
|
|
/// T *r = &q;
|
|
|
|
/// T q = 42;
|
|
|
|
/// U *p = this;
|
|
|
|
/// };
|
|
|
|
///
|
|
|
|
/// U<S> e;
|
|
|
|
///
|
|
|
|
/// The global static constructor for 'e' will need to initialize 'r' and 'p' of
|
|
|
|
/// the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
|
|
|
|
/// members. This batch algorithm will simply use general CommitValueTo() method
|
|
|
|
/// to handle the complex nested S struct initialization of 'q', before
|
|
|
|
/// processing the outermost members in a single batch. Using CommitValueTo() to
|
|
|
|
/// handle member in the outer struct is inefficient when the struct/array is
|
|
|
|
/// very large as we end up creating and destroy constant arrays for each
|
|
|
|
/// initialization.
|
|
|
|
/// For the above case, we expect the following IR to be generated:
|
|
|
|
///
|
|
|
|
/// %struct.U = type { %struct.S*, %struct.S, %struct.U* }
|
|
|
|
/// %struct.S = type { i32, i32 }
|
|
|
|
/// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
|
|
|
|
/// i64 0, i32 1),
|
|
|
|
/// %struct.S { i32 42, i32 84 }, %struct.U* @e }
|
|
|
|
/// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
|
|
|
|
/// constant expression, while the other two elements of @e are "simple".
|
|
|
|
static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) {
|
|
|
|
SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs;
|
|
|
|
SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs;
|
|
|
|
SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs;
|
|
|
|
SimpleCEs.reserve(Mem.size());
|
|
|
|
|
|
|
|
for (const auto &I : Mem) {
|
|
|
|
if (auto *GV = dyn_cast<GlobalVariable>(I.first)) {
|
|
|
|
GVs.push_back(std::make_pair(GV, I.second));
|
|
|
|
} else {
|
|
|
|
ConstantExpr *GEP = cast<ConstantExpr>(I.first);
|
|
|
|
// We don't handle the deeply recursive case using the batch method.
|
|
|
|
if (GEP->getNumOperands() > 3)
|
|
|
|
ComplexCEs.push_back(std::make_pair(GEP, I.second));
|
|
|
|
else
|
|
|
|
SimpleCEs.push_back(std::make_pair(GEP, I.second));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The algorithm below doesn't handle cases like nested structs, so use the
|
|
|
|
// slower fully general method if we have to.
|
|
|
|
for (auto ComplexCE : ComplexCEs)
|
|
|
|
CommitValueTo(ComplexCE.second, ComplexCE.first);
|
|
|
|
|
|
|
|
for (auto GVPair : GVs) {
|
|
|
|
assert(GVPair.first->hasInitializer());
|
|
|
|
GVPair.first->setInitializer(GVPair.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SimpleCEs.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// We cache a single global's initializer elements in the case where the
|
|
|
|
// subsequent address/val pair uses the same one. This avoids throwing away and
|
|
|
|
// rebuilding the constant struct/vector/array just because one element is
|
|
|
|
// modified at a time.
|
|
|
|
SmallVector<Constant *, 32> Elts;
|
|
|
|
Elts.reserve(SimpleCEs.size());
|
|
|
|
GlobalVariable *CurrentGV = nullptr;
|
|
|
|
|
|
|
|
auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) {
|
|
|
|
Constant *Init = GV->getInitializer();
|
|
|
|
Type *Ty = Init->getType();
|
|
|
|
if (Update) {
|
|
|
|
if (CurrentGV) {
|
|
|
|
assert(CurrentGV && "Expected a GV to commit to!");
|
|
|
|
Type *CurrentInitTy = CurrentGV->getInitializer()->getType();
|
|
|
|
// We have a valid cache that needs to be committed.
|
|
|
|
if (StructType *STy = dyn_cast<StructType>(CurrentInitTy))
|
|
|
|
CurrentGV->setInitializer(ConstantStruct::get(STy, Elts));
|
|
|
|
else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy))
|
|
|
|
CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts));
|
|
|
|
else
|
|
|
|
CurrentGV->setInitializer(ConstantVector::get(Elts));
|
|
|
|
}
|
|
|
|
if (CurrentGV == GV)
|
|
|
|
return;
|
|
|
|
// Need to clear and set up cache for new initializer.
|
|
|
|
CurrentGV = GV;
|
|
|
|
Elts.clear();
|
|
|
|
unsigned NumElts;
|
|
|
|
if (auto *STy = dyn_cast<StructType>(Ty))
|
|
|
|
NumElts = STy->getNumElements();
|
|
|
|
else
|
|
|
|
NumElts = cast<SequentialType>(Ty)->getNumElements();
|
|
|
|
for (unsigned i = 0, e = NumElts; i != e; ++i)
|
|
|
|
Elts.push_back(Init->getAggregateElement(i));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
for (auto CEPair : SimpleCEs) {
|
|
|
|
ConstantExpr *GEP = CEPair.first;
|
|
|
|
Constant *Val = CEPair.second;
|
|
|
|
|
|
|
|
GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0));
|
|
|
|
commitAndSetupCache(GV, GV != CurrentGV);
|
|
|
|
ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2));
|
|
|
|
Elts[CI->getZExtValue()] = Val;
|
|
|
|
}
|
|
|
|
// The last initializer in the list needs to be committed, others
|
|
|
|
// will be committed on a new initializer being processed.
|
|
|
|
commitAndSetupCache(CurrentGV, true);
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Evaluate static constructors in the function, if we can. Return true if we
|
|
|
|
/// can, false otherwise.
|
2015-03-05 02:43:29 +08:00
|
|
|
static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
|
2016-04-26 08:27:56 +08:00
|
|
|
TargetLibraryInfo *TLI) {
|
2005-09-27 12:27:01 +08:00
|
|
|
// Call the function.
|
2014-02-21 08:06:31 +08:00
|
|
|
Evaluator Eval(DL, TLI);
|
2005-09-27 12:45:34 +08:00
|
|
|
Constant *RetValDummy;
|
2012-02-20 07:26:27 +08:00
|
|
|
bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
|
|
|
|
SmallVector<Constant*, 0>());
|
2012-12-07 05:57:16 +08:00
|
|
|
|
2005-09-27 12:27:01 +08:00
|
|
|
if (EvalSuccess) {
|
2014-05-03 02:35:25 +08:00
|
|
|
++NumCtorsEvaluated;
|
|
|
|
|
2005-09-27 01:07:09 +08:00
|
|
|
// We succeeded at evaluation: commit the result.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
|
|
|
|
<< F->getName() << "' to "
|
|
|
|
<< Eval.getMutatedMemory().size() << " stores.\n");
|
[GlobalOpt] Improve common case efficiency of static global initializer evaluation
For very, very large global initializers which can be statically evaluated, the
code would create vectors of temporary Constants, modifying them in place,
before committing the resulting Constant aggregate to the global's initializer
value. This had effectively O(n^2) complexity in the size of the global
initializer and would cause memory and non-termination issues compiling some
workloads.
This change performs the static initializer evaluation and creation in batches,
once for each global in the evaluated IR memory. The existing code is maintained
as a last resort when the initializers are more complex than simple values in a
large aggregate. This should theoretically by NFC, no test as the example case
is massive. The existing test cases pass with this, as well as the llvm test
suite.
To give an example, consider the following C++ code adapted from the clang
regression tests:
struct S {
int n = 10;
int m = 2 * n;
S(int a) : n(a) {}
};
template<typename T>
struct U {
T *r = &q;
T q = 42;
U *p = this;
};
U<S> e;
The global static constructor for 'e' will need to initialize 'r' and 'p' of
the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
members. This batch algorithm will simply use general CommitValueTo() method
to handle the complex nested S struct initialization of 'q', before
processing the outermost members in a single batch. Using CommitValueTo() to
handle member in the outer struct is inefficient when the struct/array is
very large as we end up creating and destroy constant arrays for each
initialization.
For the above case, we expect the following IR to be generated:
%struct.U = type { %struct.S*, %struct.S, %struct.U* }
%struct.S = type { i32, i32 }
@e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
i64 0, i32 1),
%struct.S { i32 42, i32 84 }, %struct.U* @e }
The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
constant expression, while the other two elements of @e are "simple".
Differential Revision: https://reviews.llvm.org/D42612
llvm-svn: 323933
2018-02-01 07:56:07 +08:00
|
|
|
BatchCommitValueTo(Eval.getMutatedMemory());
|
2014-08-25 07:23:06 +08:00
|
|
|
for (GlobalVariable *GV : Eval.getInvariants())
|
|
|
|
GV->setConstant(true);
|
2005-09-27 01:07:09 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2005-09-27 12:27:01 +08:00
|
|
|
return EvalSuccess;
|
2005-09-26 12:44:35 +08:00
|
|
|
}
|
|
|
|
|
2014-03-08 05:52:38 +08:00
|
|
|
static int compareNames(Constant *const *A, Constant *const *B) {
|
2016-03-15 22:18:26 +08:00
|
|
|
Value *AStripped = (*A)->stripPointerCastsNoFollowAliases();
|
|
|
|
Value *BStripped = (*B)->stripPointerCastsNoFollowAliases();
|
|
|
|
return AStripped->getName().compare(BStripped->getName());
|
2014-03-08 05:52:38 +08:00
|
|
|
}
|
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
static void setUsedInitializer(GlobalVariable &V,
|
2018-06-12 19:16:56 +08:00
|
|
|
const SmallPtrSetImpl<GlobalValue *> &Init) {
|
2013-07-21 07:33:15 +08:00
|
|
|
if (Init.empty()) {
|
|
|
|
V.eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-01-03 03:53:49 +08:00
|
|
|
// Type of pointer to the array of pointers.
|
|
|
|
PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2017-10-11 06:49:55 +08:00
|
|
|
SmallVector<Constant *, 8> UsedArray;
|
2014-08-21 13:55:13 +08:00
|
|
|
for (GlobalValue *GV : Init) {
|
2014-01-03 03:53:49 +08:00
|
|
|
Constant *Cast
|
2014-08-21 13:55:13 +08:00
|
|
|
= ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
|
2013-06-12 01:48:06 +08:00
|
|
|
UsedArray.push_back(Cast);
|
|
|
|
}
|
|
|
|
// Sort to get deterministic order.
|
2014-03-08 05:52:38 +08:00
|
|
|
array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
|
2013-06-12 01:48:06 +08:00
|
|
|
ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
|
|
|
|
|
|
|
|
Module *M = V.getParent();
|
|
|
|
V.removeFromParent();
|
|
|
|
GlobalVariable *NV =
|
2017-10-11 06:49:55 +08:00
|
|
|
new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage,
|
|
|
|
ConstantArray::get(ATy, UsedArray), "");
|
2013-06-12 01:48:06 +08:00
|
|
|
NV->takeName(&V);
|
|
|
|
NV->setSection("llvm.metadata");
|
|
|
|
delete &V;
|
|
|
|
}
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
namespace {
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// An easy to access representation of llvm.used and llvm.compiler.used.
|
2013-06-12 01:48:06 +08:00
|
|
|
class LLVMUsed {
|
|
|
|
SmallPtrSet<GlobalValue *, 8> Used;
|
|
|
|
SmallPtrSet<GlobalValue *, 8> CompilerUsed;
|
|
|
|
GlobalVariable *UsedV;
|
|
|
|
GlobalVariable *CompilerUsedV;
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
public:
|
2013-07-25 10:50:08 +08:00
|
|
|
LLVMUsed(Module &M) {
|
2013-07-25 11:23:25 +08:00
|
|
|
UsedV = collectUsedGlobalVariables(M, Used, false);
|
|
|
|
CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
|
2013-06-12 01:48:06 +08:00
|
|
|
}
|
2017-10-11 06:49:55 +08:00
|
|
|
|
|
|
|
using iterator = SmallPtrSet<GlobalValue *, 8>::iterator;
|
|
|
|
using used_iterator_range = iterator_range<iterator>;
|
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
iterator usedBegin() { return Used.begin(); }
|
|
|
|
iterator usedEnd() { return Used.end(); }
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2014-08-25 07:23:06 +08:00
|
|
|
used_iterator_range used() {
|
|
|
|
return used_iterator_range(usedBegin(), usedEnd());
|
|
|
|
}
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
iterator compilerUsedBegin() { return CompilerUsed.begin(); }
|
|
|
|
iterator compilerUsedEnd() { return CompilerUsed.end(); }
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2014-08-25 07:23:06 +08:00
|
|
|
used_iterator_range compilerUsed() {
|
|
|
|
return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
|
|
|
|
}
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
bool compilerUsedCount(GlobalValue *GV) const {
|
|
|
|
return CompilerUsed.count(GV);
|
|
|
|
}
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
|
|
|
|
bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
|
2014-11-19 15:49:26 +08:00
|
|
|
bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2014-11-19 15:49:26 +08:00
|
|
|
bool compilerUsedInsert(GlobalValue *GV) {
|
|
|
|
return CompilerUsed.insert(GV).second;
|
|
|
|
}
|
2013-06-12 01:48:06 +08:00
|
|
|
|
|
|
|
void syncVariablesAndSets() {
|
|
|
|
if (UsedV)
|
|
|
|
setUsedInitializer(*UsedV, Used);
|
|
|
|
if (CompilerUsedV)
|
|
|
|
setUsedInitializer(*CompilerUsedV, CompilerUsed);
|
|
|
|
}
|
|
|
|
};
|
2017-10-11 06:49:55 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
|
|
|
|
if (GA.use_empty()) // No use at all.
|
|
|
|
return false;
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
|
|
|
|
"We should have removed the duplicated "
|
2013-07-20 02:44:51 +08:00
|
|
|
"element from llvm.compiler.used");
|
2013-06-12 01:48:06 +08:00
|
|
|
if (!GA.hasOneUse())
|
|
|
|
// Strictly more than one use. So at least one is not in llvm.used and
|
2013-07-20 02:44:51 +08:00
|
|
|
// llvm.compiler.used.
|
2013-06-12 01:48:06 +08:00
|
|
|
return true;
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2013-07-20 02:44:51 +08:00
|
|
|
// Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
|
2013-06-12 01:48:06 +08:00
|
|
|
return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
|
2013-05-10 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
|
|
|
|
const LLVMUsed &U) {
|
|
|
|
unsigned N = 2;
|
|
|
|
assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
|
|
|
|
"We should have removed the duplicated "
|
2013-07-20 02:44:51 +08:00
|
|
|
"element from llvm.compiler.used");
|
2013-06-12 01:48:06 +08:00
|
|
|
if (U.usedCount(&V) || U.compilerUsedCount(&V))
|
|
|
|
++N;
|
|
|
|
return V.hasNUsesOrMore(N);
|
2013-05-10 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
|
|
|
|
if (!GA.hasLocalLinkage())
|
|
|
|
return true;
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
return U.usedCount(&GA) || U.compilerUsedCount(&GA);
|
|
|
|
}
|
2013-05-10 01:22:59 +08:00
|
|
|
|
2014-08-21 13:55:13 +08:00
|
|
|
static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
|
|
|
|
bool &RenameTarget) {
|
2013-06-12 01:48:06 +08:00
|
|
|
RenameTarget = false;
|
|
|
|
bool Ret = false;
|
|
|
|
if (hasUseOtherThanLLVMUsed(GA, U))
|
2013-05-10 01:22:59 +08:00
|
|
|
Ret = true;
|
2013-06-12 01:48:06 +08:00
|
|
|
|
|
|
|
// If the alias is externally visible, we may still be able to simplify it.
|
|
|
|
if (!mayHaveOtherReferences(GA, U))
|
|
|
|
return Ret;
|
|
|
|
|
|
|
|
// If the aliasee has internal linkage, give it the name and linkage
|
|
|
|
// of the alias, and delete the alias. This turns:
|
|
|
|
// define internal ... @f(...)
|
|
|
|
// @a = alias ... @f
|
|
|
|
// into:
|
|
|
|
// define ... @a(...)
|
|
|
|
Constant *Aliasee = GA.getAliasee();
|
|
|
|
GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
|
|
|
|
if (!Target->hasLocalLinkage())
|
|
|
|
return Ret;
|
|
|
|
|
|
|
|
// Do not perform the transform if multiple aliases potentially target the
|
|
|
|
// aliasee. This check also ensures that it is safe to replace the section
|
|
|
|
// and other attributes of the aliasee with those of the alias.
|
|
|
|
if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
|
|
|
|
return Ret;
|
|
|
|
|
|
|
|
RenameTarget = true;
|
|
|
|
return true;
|
2013-05-10 01:22:59 +08:00
|
|
|
}
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
static bool
|
|
|
|
OptimizeGlobalAliases(Module &M,
|
2018-06-12 19:16:56 +08:00
|
|
|
SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
|
2008-09-10 03:04:59 +08:00
|
|
|
bool Changed = false;
|
2013-06-12 01:48:06 +08:00
|
|
|
LLVMUsed Used(M);
|
|
|
|
|
2014-08-25 07:23:06 +08:00
|
|
|
for (GlobalValue *GV : Used.used())
|
|
|
|
Used.compilerUsedErase(GV);
|
2008-09-10 03:04:59 +08:00
|
|
|
|
2009-01-08 04:01:06 +08:00
|
|
|
for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
|
2009-02-15 17:56:08 +08:00
|
|
|
I != E;) {
|
2015-12-23 03:50:22 +08:00
|
|
|
GlobalAlias *J = &*I++;
|
|
|
|
|
2009-03-06 18:21:56 +08:00
|
|
|
// Aliases without names cannot be referenced outside this module.
|
2014-07-01 23:26:50 +08:00
|
|
|
if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
|
2009-03-06 18:21:56 +08:00
|
|
|
J->setLinkage(GlobalValue::InternalLinkage);
|
2015-12-23 03:50:22 +08:00
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
if (deleteIfDead(*J, NotDiscardableComdats)) {
|
2015-12-23 03:50:22 +08:00
|
|
|
Changed = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-02-23 07:12:11 +08:00
|
|
|
// If the alias can change at link time, nothing can be done - bail out.
|
Don't IPO over functions that can be de-refined
Summary:
Fixes PR26774.
If you're aware of the issue, feel free to skip the "Motivation"
section and jump directly to "This patch".
Motivation:
I define "refinement" as discarding behaviors from a program that the
optimizer has license to discard. So transforming:
```
void f(unsigned x) {
unsigned t = 5 / x;
(void)t;
}
```
to
```
void f(unsigned x) { }
```
is refinement, since the behavior went from "if x == 0 then undefined
else nothing" to "nothing" (the optimizer has license to discard
undefined behavior).
Refinement is a fundamental aspect of many mid-level optimizations done
by LLVM. For instance, transforming `x == (x + 1)` to `false` also
involves refinement since the expression's value went from "if x is
`undef` then { `true` or `false` } else { `false` }" to "`false`" (by
definition, the optimizer has license to fold `undef` to any non-`undef`
value).
Unfortunately, refinement implies that the optimizer cannot assume
that the implementation of a function it can see has all of the
behavior an unoptimized or a differently optimized version of the same
function can have. This is a problem for functions with comdat
linkage, where a function can be replaced by an unoptimized or a
differently optimized version of the same source level function.
For instance, FunctionAttrs cannot assume a comdat function is
actually `readnone` even if it does not have any loads or stores in
it; since there may have been loads and stores in the "original
function" that were refined out in the currently visible variant, and
at the link step the linker may in fact choose an implementation with
a load or a store. As an example, consider a function that does two
atomic loads from the same memory location, and writes to memory only
if the two values are not equal. The optimizer is allowed to refine
this function by first CSE'ing the two loads, and the folding the
comparision to always report that the two values are equal. Such a
refined variant will look like it is `readonly`. However, the
unoptimized version of the function can still write to memory (since
the two loads //can// result in different values), and selecting the
unoptimized version at link time will retroactively invalidate
transforms we may have done under the assumption that the function
does not write to memory.
Note: this is not just a problem with atomics or with linking
differently optimized object files. See PR26774 for more realistic
examples that involved neither.
This patch:
This change introduces a new set of linkage types, predicated as
`GlobalValue::mayBeDerefined` that returns true if the linkage type
allows a function to be replaced by a differently optimized variant at
link time. It then changes a set of IPO passes to bail out if they see
such a function.
Reviewers: chandlerc, hfinkel, dexonsmith, joker.eph, rnk
Subscribers: mcrosier, llvm-commits
Differential Revision: http://reviews.llvm.org/D18634
llvm-svn: 265762
2016-04-08 08:48:30 +08:00
|
|
|
if (J->isInterposable())
|
2008-09-10 03:04:59 +08:00
|
|
|
continue;
|
|
|
|
|
2009-02-15 17:56:08 +08:00
|
|
|
Constant *Aliasee = J->getAliasee();
|
2014-07-01 08:30:56 +08:00
|
|
|
GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
|
|
|
|
// We can't trivially replace the alias with the aliasee if the aliasee is
|
|
|
|
// non-trivial in some way.
|
|
|
|
// TODO: Try to handle non-zero GEPs of local aliasees.
|
|
|
|
if (!Target)
|
|
|
|
continue;
|
2009-02-19 01:55:38 +08:00
|
|
|
Target->removeDeadConstantUsers();
|
2009-02-15 17:56:08 +08:00
|
|
|
|
|
|
|
// Make all users of the alias use the aliasee instead.
|
2013-06-12 01:48:06 +08:00
|
|
|
bool RenameTarget;
|
|
|
|
if (!hasUsesToReplace(*J, Used, RenameTarget))
|
2013-05-10 01:22:59 +08:00
|
|
|
continue;
|
2009-02-15 17:56:08 +08:00
|
|
|
|
2014-05-17 03:35:39 +08:00
|
|
|
J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
|
2013-06-12 01:48:06 +08:00
|
|
|
++NumAliasesResolved;
|
|
|
|
Changed = true;
|
2009-02-15 17:56:08 +08:00
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
if (RenameTarget) {
|
2009-12-08 18:10:20 +08:00
|
|
|
// Give the aliasee the name, linkage and other attributes of the alias.
|
2015-10-14 01:51:03 +08:00
|
|
|
Target->takeName(&*J);
|
2009-12-08 18:10:20 +08:00
|
|
|
Target->setLinkage(J->getLinkage());
|
2018-01-12 06:15:05 +08:00
|
|
|
Target->setDSOLocal(J->isDSOLocal());
|
2014-02-13 10:18:36 +08:00
|
|
|
Target->setVisibility(J->getVisibility());
|
|
|
|
Target->setDLLStorageClass(J->getDLLStorageClass());
|
2013-06-12 01:48:06 +08:00
|
|
|
|
2015-10-14 01:51:03 +08:00
|
|
|
if (Used.usedErase(&*J))
|
2013-06-12 01:48:06 +08:00
|
|
|
Used.usedInsert(Target);
|
|
|
|
|
2015-10-14 01:51:03 +08:00
|
|
|
if (Used.compilerUsedErase(&*J))
|
2013-06-12 01:48:06 +08:00
|
|
|
Used.compilerUsedInsert(Target);
|
2013-06-13 00:45:47 +08:00
|
|
|
} else if (mayHaveOtherReferences(*J, Used))
|
2013-06-12 01:48:06 +08:00
|
|
|
continue;
|
|
|
|
|
2009-02-15 17:56:08 +08:00
|
|
|
// Delete the alias.
|
|
|
|
M.getAliasList().erase(J);
|
|
|
|
++NumAliasesRemoved;
|
|
|
|
Changed = true;
|
2008-09-10 03:04:59 +08:00
|
|
|
}
|
|
|
|
|
2013-06-12 01:48:06 +08:00
|
|
|
Used.syncVariablesAndSets();
|
|
|
|
|
2008-09-10 03:04:59 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
2005-09-26 09:43:45 +08:00
|
|
|
|
2012-02-12 10:15:20 +08:00
|
|
|
static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
LibFunc F = LibFunc_cxa_atexit;
|
2016-04-28 03:04:35 +08:00
|
|
|
if (!TLI->has(F))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2012-02-12 10:15:20 +08:00
|
|
|
|
2016-04-28 03:04:35 +08:00
|
|
|
Function *Fn = M.getFunction(TLI->getName(F));
|
2011-03-21 01:59:11 +08:00
|
|
|
if (!Fn)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2012-02-12 10:15:20 +08:00
|
|
|
|
2016-04-28 03:04:35 +08:00
|
|
|
// Make sure that the function has the correct prototype.
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 07:16:46 +08:00
|
|
|
if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2011-03-21 01:59:11 +08:00
|
|
|
|
|
|
|
return Fn;
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:05:07 +08:00
|
|
|
/// Returns whether the given function is an empty C++ destructor and can
|
|
|
|
/// therefore be eliminated.
|
2011-03-21 01:59:11 +08:00
|
|
|
/// Note that we assume that other optimization passes have already simplified
|
|
|
|
/// the code so we only look for a function with a single basic block, where
|
2012-02-10 00:28:15 +08:00
|
|
|
/// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
|
|
|
|
/// other side-effect free instructions.
|
2011-03-21 04:16:43 +08:00
|
|
|
static bool cxxDtorIsEmpty(const Function &Fn,
|
|
|
|
SmallPtrSet<const Function *, 8> &CalledFunctions) {
|
2011-03-21 03:51:13 +08:00
|
|
|
// FIXME: We could eliminate C++ destructors if they're readonly/readnone and
|
2011-03-21 10:26:01 +08:00
|
|
|
// nounwind, but that doesn't seem worth doing.
|
2011-03-21 03:51:13 +08:00
|
|
|
if (Fn.isDeclaration())
|
|
|
|
return false;
|
2011-03-21 01:59:11 +08:00
|
|
|
|
|
|
|
if (++Fn.begin() != Fn.end())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const BasicBlock &EntryBlock = Fn.getEntryBlock();
|
|
|
|
for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(I)) {
|
2011-03-21 22:54:40 +08:00
|
|
|
// Ignore debug intrinsics.
|
|
|
|
if (isa<DbgInfoIntrinsic>(CI))
|
|
|
|
continue;
|
|
|
|
|
2011-03-21 01:59:11 +08:00
|
|
|
const Function *CalledFn = CI->getCalledFunction();
|
|
|
|
|
|
|
|
if (!CalledFn)
|
|
|
|
return false;
|
|
|
|
|
2011-03-22 11:21:01 +08:00
|
|
|
SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
|
|
|
|
|
2011-03-21 03:51:13 +08:00
|
|
|
// Don't treat recursive functions as empty.
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!NewCalledFunctions.insert(CalledFn).second)
|
2011-03-21 03:51:13 +08:00
|
|
|
return false;
|
|
|
|
|
2011-03-22 11:21:01 +08:00
|
|
|
if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
|
2011-03-21 01:59:11 +08:00
|
|
|
return false;
|
|
|
|
} else if (isa<ReturnInst>(*I))
|
2012-02-09 22:26:06 +08:00
|
|
|
return true; // We're done.
|
|
|
|
else if (I->mayHaveSideEffects())
|
|
|
|
return false; // Destructor with side effects, bail.
|
2011-03-21 01:59:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:27:56 +08:00
|
|
|
static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
|
2011-03-21 01:59:11 +08:00
|
|
|
/// Itanium C++ ABI p3.3.5:
|
|
|
|
///
|
|
|
|
/// After constructing a global (or local static) object, that will require
|
|
|
|
/// destruction on exit, a termination function is registered as follows:
|
|
|
|
///
|
|
|
|
/// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
|
|
|
|
///
|
|
|
|
/// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
|
|
|
|
/// call f(p) when DSO d is unloaded, before all such termination calls
|
|
|
|
/// registered before this one. It returns zero if registration is
|
2011-03-21 10:26:01 +08:00
|
|
|
/// successful, nonzero on failure.
|
2011-03-21 01:59:11 +08:00
|
|
|
|
|
|
|
// This pass will look for calls to __cxa_atexit where the function is trivial
|
|
|
|
// and remove them.
|
|
|
|
bool Changed = false;
|
|
|
|
|
2014-03-09 11:16:01 +08:00
|
|
|
for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
|
|
|
|
I != E;) {
|
2011-03-21 04:21:33 +08:00
|
|
|
// We're only interested in calls. Theoretically, we could handle invoke
|
|
|
|
// instructions as well, but neither llvm-gcc nor clang generate invokes
|
|
|
|
// to __cxa_atexit.
|
2011-03-21 22:54:40 +08:00
|
|
|
CallInst *CI = dyn_cast<CallInst>(*I++);
|
|
|
|
if (!CI)
|
2011-03-21 04:21:33 +08:00
|
|
|
continue;
|
|
|
|
|
2012-12-07 05:57:16 +08:00
|
|
|
Function *DtorFn =
|
2011-03-21 22:54:40 +08:00
|
|
|
dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
|
2011-03-21 01:59:11 +08:00
|
|
|
if (!DtorFn)
|
|
|
|
continue;
|
|
|
|
|
2011-03-21 04:16:43 +08:00
|
|
|
SmallPtrSet<const Function *, 8> CalledFunctions;
|
|
|
|
if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
|
2011-03-21 01:59:11 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Just remove the call.
|
2011-03-21 22:54:40 +08:00
|
|
|
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
|
|
|
|
CI->eraseFromParent();
|
2011-03-21 03:51:13 +08:00
|
|
|
|
2011-03-21 01:59:11 +08:00
|
|
|
++NumCXXDtorsRemoved;
|
|
|
|
|
|
|
|
Changed |= true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:28:01 +08:00
|
|
|
static bool optimizeGlobalsInModule(
|
|
|
|
Module &M, const DataLayout &DL, TargetLibraryInfo *TLI,
|
2018-01-31 00:17:22 +08:00
|
|
|
function_ref<TargetTransformInfo &(Function &)> GetTTI,
|
|
|
|
function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
|
2016-04-26 08:28:01 +08:00
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree) {
|
2018-06-12 19:16:56 +08:00
|
|
|
SmallPtrSet<const Comdat *, 8> NotDiscardableComdats;
|
2016-04-26 08:28:01 +08:00
|
|
|
bool Changed = false;
|
2005-09-26 09:43:45 +08:00
|
|
|
bool LocalChange = true;
|
2004-10-09 04:59:28 +08:00
|
|
|
while (LocalChange) {
|
|
|
|
LocalChange = false;
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2014-10-08 15:23:31 +08:00
|
|
|
NotDiscardableComdats.clear();
|
|
|
|
for (const GlobalVariable &GV : M.globals())
|
|
|
|
if (const Comdat *C = GV.getComdat())
|
|
|
|
if (!GV.isDiscardableIfUnused() || !GV.use_empty())
|
|
|
|
NotDiscardableComdats.insert(C);
|
|
|
|
for (Function &F : M)
|
|
|
|
if (const Comdat *C = F.getComdat())
|
|
|
|
if (!F.isDefTriviallyDead())
|
|
|
|
NotDiscardableComdats.insert(C);
|
|
|
|
for (GlobalAlias &GA : M.aliases())
|
|
|
|
if (const Comdat *C = GA.getComdat())
|
|
|
|
if (!GA.isDiscardableIfUnused() || !GA.use_empty())
|
|
|
|
NotDiscardableComdats.insert(C);
|
|
|
|
|
2005-09-26 09:43:45 +08:00
|
|
|
// Delete functions that are trivially dead, ccc -> fastcc
|
2018-01-31 00:17:22 +08:00
|
|
|
LocalChange |= OptimizeFunctions(M, TLI, GetTTI, GetBFI, LookupDomTree,
|
|
|
|
NotDiscardableComdats);
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2005-09-26 09:43:45 +08:00
|
|
|
// Optimize global_ctors list.
|
2014-05-06 09:44:26 +08:00
|
|
|
LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
|
|
|
|
return EvaluateStaticConstructor(F, DL, TLI);
|
|
|
|
});
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2005-09-26 09:43:45 +08:00
|
|
|
// Optimize non-address-taken globals.
|
2016-04-26 08:27:56 +08:00
|
|
|
LocalChange |= OptimizeGlobalVars(M, TLI, LookupDomTree,
|
|
|
|
NotDiscardableComdats);
|
2008-09-10 03:04:59 +08:00
|
|
|
|
|
|
|
// Resolve aliases, when possible.
|
2016-04-26 08:27:56 +08:00
|
|
|
LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats);
|
2011-03-21 01:59:11 +08:00
|
|
|
|
2013-05-15 05:52:44 +08:00
|
|
|
// Try to remove trivial global destructors if they are not removed
|
|
|
|
// already.
|
|
|
|
Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
|
2011-03-21 01:59:11 +08:00
|
|
|
if (CXAAtExitFn)
|
|
|
|
LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
|
|
|
|
|
2008-09-10 03:04:59 +08:00
|
|
|
Changed |= LocalChange;
|
2004-10-07 12:16:33 +08:00
|
|
|
}
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2005-09-26 09:43:45 +08:00
|
|
|
// TODO: Move all global ctors functions to the end of the module for code
|
|
|
|
// layout.
|
2010-10-19 05:16:00 +08:00
|
|
|
|
2004-10-07 12:16:33 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
2016-04-26 08:28:01 +08:00
|
|
|
|
2016-08-09 08:28:38 +08:00
|
|
|
PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) {
|
2016-04-26 08:28:01 +08:00
|
|
|
auto &DL = M.getDataLayout();
|
|
|
|
auto &TLI = AM.getResult<TargetLibraryAnalysis>(M);
|
|
|
|
auto &FAM =
|
|
|
|
AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
|
|
|
|
auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{
|
|
|
|
return FAM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
};
|
2018-01-31 00:17:22 +08:00
|
|
|
auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & {
|
|
|
|
return FAM.getResult<TargetIRAnalysis>(F);
|
|
|
|
};
|
|
|
|
|
|
|
|
auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & {
|
|
|
|
return FAM.getResult<BlockFrequencyAnalysis>(F);
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!optimizeGlobalsInModule(M, DL, &TLI, GetTTI, GetBFI, LookupDomTree))
|
2016-04-26 08:28:01 +08:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
return PreservedAnalyses::none();
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2016-04-26 08:28:01 +08:00
|
|
|
struct GlobalOptLegacyPass : public ModulePass {
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2016-04-26 08:28:01 +08:00
|
|
|
GlobalOptLegacyPass() : ModulePass(ID) {
|
|
|
|
initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnModule(Module &M) override {
|
|
|
|
if (skipModule(M))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto &DL = M.getDataLayout();
|
|
|
|
auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
|
|
|
|
auto LookupDomTree = [this](Function &F) -> DominatorTree & {
|
|
|
|
return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
|
|
|
|
};
|
2018-01-31 00:17:22 +08:00
|
|
|
auto GetTTI = [this](Function &F) -> TargetTransformInfo & {
|
|
|
|
return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
|
|
|
};
|
|
|
|
|
|
|
|
auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & {
|
|
|
|
return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI();
|
|
|
|
};
|
|
|
|
|
|
|
|
return optimizeGlobalsInModule(M, DL, TLI, GetTTI, GetBFI, LookupDomTree);
|
2016-04-26 08:28:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
2018-01-31 00:17:22 +08:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2016-04-26 08:28:01 +08:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2018-01-31 00:17:22 +08:00
|
|
|
AU.addRequired<BlockFrequencyInfoWrapperPass>();
|
2016-04-26 08:28:01 +08:00
|
|
|
}
|
|
|
|
};
|
2017-10-11 06:49:55 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2016-04-26 08:28:01 +08:00
|
|
|
|
|
|
|
char GlobalOptLegacyPass::ID = 0;
|
2017-10-11 06:49:55 +08:00
|
|
|
|
2016-04-26 08:28:01 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt",
|
|
|
|
"Global Variable Optimizer", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
2018-01-31 00:17:22 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
|
2016-04-26 08:28:01 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt",
|
|
|
|
"Global Variable Optimizer", false, false)
|
|
|
|
|
|
|
|
ModulePass *llvm::createGlobalOptimizerPass() {
|
|
|
|
return new GlobalOptLegacyPass();
|
|
|
|
}
|