Thread Safety Analysis: major update to thread safety TIL.

Numerous changes, including:
  * Changed the way variables and instructions are handled in basic blocks to
    be more efficient.
  * Eliminated SExprRef.
  * Simplified futures.
  * Fixed documentation.
  * Compute dominator and post dominator trees.

llvm-svn: 217556
This commit is contained in:
DeLesley Hutchins 2014-09-10 22:12:52 +00:00
parent a00a6526dc
commit 4e38f100b5
8 changed files with 914 additions and 765 deletions

View File

@ -477,9 +477,9 @@ private:
// Indexed by clang BlockID.
LVarDefinitionMap CurrentLVarMap;
std::vector<til::Variable*> CurrentArguments;
std::vector<til::Variable*> CurrentInstructions;
std::vector<til::Variable*> IncompleteArgs;
std::vector<til::Phi*> CurrentArguments;
std::vector<til::SExpr*> CurrentInstructions;
std::vector<til::Phi*> IncompleteArgs;
til::BasicBlock *CurrentBB;
BlockInfo *CurrentBlockInfo;
};

View File

@ -41,13 +41,13 @@ private:
};
class Terminal : public LExpr {
til::SExprRef Expr;
til::SExpr *Expr;
public:
Terminal(til::SExpr *Expr) : LExpr(LExpr::Terminal), Expr(Expr) {}
const til::SExpr *expr() const { return Expr.get(); }
til::SExpr *expr() { return Expr.get(); }
const til::SExpr *expr() const { return Expr; }
til::SExpr *expr() { return Expr; }
static bool classof(const LExpr *E) { return E->kind() == LExpr::Terminal; }
};

View File

@ -44,8 +44,11 @@ TIL_OPCODE_DEF(Cast)
TIL_OPCODE_DEF(SCFG)
TIL_OPCODE_DEF(BasicBlock)
TIL_OPCODE_DEF(Phi)
// Terminator instructions
TIL_OPCODE_DEF(Goto)
TIL_OPCODE_DEF(Branch)
TIL_OPCODE_DEF(Return)
// pseudo-terms
TIL_OPCODE_DEF(Identifier)

File diff suppressed because it is too large Load Diff

View File

@ -58,11 +58,16 @@ public:
// Traverse an expression -- returning a result of type R_SExpr.
// Override this method to do something for every expression, regardless
// of which kind it is.
typename R::R_SExpr traverse(SExprRef &E, typename R::R_Ctx Ctx) {
return traverse(E.get(), Ctx);
// E is a reference, so this can be use for in-place updates.
// The type T must be a subclass of SExpr.
template <class T>
typename R::R_SExpr traverse(T* &E, typename R::R_Ctx Ctx) {
return traverseSExpr(E, Ctx);
}
typename R::R_SExpr traverse(SExpr *E, typename R::R_Ctx Ctx) {
// Override this method to do something for every expression.
// Does not allow in-place updates.
typename R::R_SExpr traverseSExpr(SExpr *E, typename R::R_Ctx Ctx) {
return traverseByCase(E, Ctx);
}
@ -75,6 +80,7 @@ public:
#include "ThreadSafetyOps.def"
#undef TIL_OPCODE_DEF
}
return self()->reduceNull();
}
// Traverse e, by static dispatch on the type "X" of e.
@ -92,10 +98,10 @@ public:
class SimpleReducerBase {
public:
enum TraversalKind {
TRV_Normal,
TRV_Decl,
TRV_Lazy,
TRV_Type
TRV_Normal, // ordinary subexpressions
TRV_Decl, // declarations (e.g. function bodies)
TRV_Lazy, // expressions that require lazy evaluation
TRV_Type // type expressions
};
// R_Ctx defines a "context" for the traversal, which encodes information
@ -147,153 +153,6 @@ protected:
};
// Implements a traversal that makes a deep copy of an SExpr.
// The default behavior of reduce##X(...) is to create a copy of the original.
// Subclasses can override reduce##X to implement non-destructive rewriting
// passes.
template<class Self>
class CopyReducer : public Traversal<Self, CopyReducerBase>,
public CopyReducerBase {
public:
CopyReducer(MemRegionRef A) : CopyReducerBase(A) {}
public:
R_SExpr reduceNull() {
return nullptr;
}
// R_SExpr reduceFuture(...) is never used.
R_SExpr reduceUndefined(Undefined &Orig) {
return new (Arena) Undefined(Orig);
}
R_SExpr reduceWildcard(Wildcard &Orig) {
return new (Arena) Wildcard(Orig);
}
R_SExpr reduceLiteral(Literal &Orig) {
return new (Arena) Literal(Orig);
}
template<class T>
R_SExpr reduceLiteralT(LiteralT<T> &Orig) {
return new (Arena) LiteralT<T>(Orig);
}
R_SExpr reduceLiteralPtr(LiteralPtr &Orig) {
return new (Arena) LiteralPtr(Orig);
}
R_SExpr reduceFunction(Function &Orig, Variable *Nvd, R_SExpr E0) {
return new (Arena) Function(Orig, Nvd, E0);
}
R_SExpr reduceSFunction(SFunction &Orig, Variable *Nvd, R_SExpr E0) {
return new (Arena) SFunction(Orig, Nvd, E0);
}
R_SExpr reduceCode(Code &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) Code(Orig, E0, E1);
}
R_SExpr reduceField(Field &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) Field(Orig, E0, E1);
}
R_SExpr reduceApply(Apply &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) Apply(Orig, E0, E1);
}
R_SExpr reduceSApply(SApply &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) SApply(Orig, E0, E1);
}
R_SExpr reduceProject(Project &Orig, R_SExpr E0) {
return new (Arena) Project(Orig, E0);
}
R_SExpr reduceCall(Call &Orig, R_SExpr E0) {
return new (Arena) Call(Orig, E0);
}
R_SExpr reduceAlloc(Alloc &Orig, R_SExpr E0) {
return new (Arena) Alloc(Orig, E0);
}
R_SExpr reduceLoad(Load &Orig, R_SExpr E0) {
return new (Arena) Load(Orig, E0);
}
R_SExpr reduceStore(Store &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) Store(Orig, E0, E1);
}
R_SExpr reduceArrayIndex(ArrayIndex &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) ArrayIndex(Orig, E0, E1);
}
R_SExpr reduceArrayAdd(ArrayAdd &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) ArrayAdd(Orig, E0, E1);
}
R_SExpr reduceUnaryOp(UnaryOp &Orig, R_SExpr E0) {
return new (Arena) UnaryOp(Orig, E0);
}
R_SExpr reduceBinaryOp(BinaryOp &Orig, R_SExpr E0, R_SExpr E1) {
return new (Arena) BinaryOp(Orig, E0, E1);
}
R_SExpr reduceCast(Cast &Orig, R_SExpr E0) {
return new (Arena) Cast(Orig, E0);
}
R_SExpr reduceSCFG(SCFG &Orig, Container<BasicBlock *> &Bbs) {
return nullptr; // FIXME: implement CFG rewriting
}
R_BasicBlock reduceBasicBlock(BasicBlock &Orig, Container<Variable *> &As,
Container<Variable *> &Is, R_SExpr T) {
return nullptr; // FIXME: implement CFG rewriting
}
R_SExpr reducePhi(Phi &Orig, Container<R_SExpr> &As) {
return new (Arena) Phi(Orig, std::move(As.Elems));
}
R_SExpr reduceGoto(Goto &Orig, BasicBlock *B) {
return new (Arena) Goto(Orig, B, 0); // FIXME: set index
}
R_SExpr reduceBranch(Branch &O, R_SExpr C, BasicBlock *B0, BasicBlock *B1) {
return new (Arena) Branch(O, C, B0, B1, 0, 0); // FIXME: set indices
}
R_SExpr reduceIdentifier(Identifier &Orig) {
return new (Arena) Identifier(Orig);
}
R_SExpr reduceIfThenElse(IfThenElse &Orig, R_SExpr C, R_SExpr T, R_SExpr E) {
return new (Arena) IfThenElse(Orig, C, T, E);
}
R_SExpr reduceLet(Let &Orig, Variable *Nvd, R_SExpr B) {
return new (Arena) Let(Orig, Nvd, B);
}
// Create a new variable from orig, and push it onto the lexical scope.
Variable *enterScope(Variable &Orig, R_SExpr E0) {
return new (Arena) Variable(Orig, E0);
}
// Exit the lexical scope of orig.
void exitScope(const Variable &Orig) {}
void enterCFG(SCFG &Cfg) {}
void exitCFG(SCFG &Cfg) {}
void enterBasicBlock(BasicBlock &BB) {}
void exitBasicBlock(BasicBlock &BB) {}
// Map Variable references to their rewritten definitions.
Variable *reduceVariableRef(Variable *Ovd) { return Ovd; }
// Map BasicBlock references to their rewritten definitions.
BasicBlock *reduceBasicBlockRef(BasicBlock *Obb) { return Obb; }
};
class SExprCopier : public CopyReducer<SExprCopier> {
public:
typedef SExpr *R_SExpr;
SExprCopier(MemRegionRef A) : CopyReducer(A) { }
// Create a copy of e in region a.
static SExpr *copy(SExpr *E, MemRegionRef A) {
SExprCopier Copier(A);
return Copier.traverse(E, TRV_Normal);
}
};
// Base class for visit traversals.
class VisitReducerBase : public SimpleReducerBase {
public:
@ -368,8 +227,8 @@ public:
R_SExpr reduceSCFG(SCFG &Orig, Container<BasicBlock *> Bbs) {
return Bbs.Success;
}
R_BasicBlock reduceBasicBlock(BasicBlock &Orig, Container<Variable *> &As,
Container<Variable *> &Is, R_SExpr T) {
R_BasicBlock reduceBasicBlock(BasicBlock &Orig, Container<R_SExpr> &As,
Container<R_SExpr> &Is, R_SExpr T) {
return (As.Success && Is.Success && T);
}
R_SExpr reducePhi(Phi &Orig, Container<R_SExpr> &As) {
@ -381,6 +240,9 @@ public:
R_SExpr reduceBranch(Branch &O, R_SExpr C, BasicBlock *B0, BasicBlock *B1) {
return C;
}
R_SExpr reduceReturn(Return &O, R_SExpr E) {
return E;
}
R_SExpr reduceIdentifier(Identifier &Orig) {
return true;
@ -433,7 +295,7 @@ public:
#include "ThreadSafetyOps.def"
#undef TIL_OPCODE_DEF
}
llvm_unreachable("invalid enum");
return false;
}
};
@ -514,9 +376,9 @@ public:
inline std::ostream& operator<<(std::ostream& SS, llvm::StringRef R) {
return SS.write(R.data(), R.size());
}
// inline std::ostream& operator<<(std::ostream& SS, StringRef R) {
// return SS.write(R.data(), R.size());
// }
// Pretty printer for TIL expressions
template <typename Self, typename StreamType>
@ -587,6 +449,7 @@ protected:
case COP_Phi: return Prec_Atom;
case COP_Goto: return Prec_Atom;
case COP_Branch: return Prec_Atom;
case COP_Return: return Prec_Other;
case COP_Identifier: return Prec_Atom;
case COP_IfThenElse: return Prec_Other;
@ -595,22 +458,29 @@ protected:
return Prec_MAX;
}
void printBlockLabel(StreamType & SS, const BasicBlock *BB, unsigned index) {
void printBlockLabel(StreamType & SS, const BasicBlock *BB, int index) {
if (!BB) {
SS << "BB_null";
return;
}
SS << "BB_";
SS << BB->blockID();
SS << ":";
SS << index;
if (index >= 0) {
SS << ":";
SS << index;
}
}
void printSExpr(const SExpr *E, StreamType &SS, unsigned P) {
void printSExpr(const SExpr *E, StreamType &SS, unsigned P, bool Sub=true) {
if (!E) {
self()->printNull(SS);
return;
}
if (Sub && E->block() && E->opcode() != COP_Variable) {
SS << "_x" << E->id();
return;
}
if (self()->precedence(E) > P) {
// Wrap expr in () if necessary.
SS << "(";
@ -740,20 +610,11 @@ protected:
SS << E->clangDecl()->getNameAsString();
}
void printVariable(const Variable *V, StreamType &SS, bool IsVarDecl = false) {
if (!IsVarDecl && Cleanup) {
const SExpr* E = getCanonicalVal(V);
if (E != V) {
printSExpr(E, SS, Prec_Atom);
return;
}
}
if (V->kind() == Variable::VK_LetBB)
SS << V->name() << V->getBlockID() << "_" << V->getID();
else if (CStyle && V->kind() == Variable::VK_SFun)
void printVariable(const Variable *V, StreamType &SS, bool IsVarDecl=false) {
if (CStyle && V->kind() == Variable::VK_SFun)
SS << "this";
else
SS << V->name() << V->getID();
SS << V->name() << V->id();
}
void printFunction(const Function *E, StreamType &SS, unsigned sugared = 0) {
@ -927,32 +788,38 @@ protected:
newline(SS);
}
void printBBInstr(const SExpr *E, StreamType &SS) {
bool Sub = false;
if (E->opcode() == COP_Variable) {
auto *V = cast<Variable>(E);
SS << "let " << V->name() << V->id() << " = ";
E = V->definition();
Sub = true;
}
else if (E->opcode() != COP_Store) {
SS << "let _x" << E->id() << " = ";
}
self()->printSExpr(E, SS, Prec_MAX, Sub);
SS << ";";
newline(SS);
}
void printBasicBlock(const BasicBlock *E, StreamType &SS) {
SS << "BB_" << E->blockID() << ":";
if (E->parent())
SS << " BB_" << E->parent()->blockID();
newline(SS);
for (auto *A : E->arguments()) {
SS << "let ";
self()->printVariable(A, SS, true);
SS << " = ";
self()->printSExpr(A->definition(), SS, Prec_MAX);
SS << ";";
newline(SS);
}
for (auto *I : E->instructions()) {
if (I->definition()->opcode() != COP_Store) {
SS << "let ";
self()->printVariable(I, SS, true);
SS << " = ";
}
self()->printSExpr(I->definition(), SS, Prec_MAX);
SS << ";";
newline(SS);
}
for (auto *A : E->arguments())
printBBInstr(A, SS);
for (auto *I : E->instructions())
printBBInstr(I, SS);
const SExpr *T = E->terminator();
if (T) {
self()->printSExpr(T, SS, Prec_MAX);
self()->printSExpr(T, SS, Prec_MAX, false);
SS << ";";
newline(SS);
}
@ -983,9 +850,14 @@ protected:
SS << "branch (";
self()->printSExpr(E->condition(), SS, Prec_MAX);
SS << ") ";
printBlockLabel(SS, E->thenBlock(), E->thenIndex());
printBlockLabel(SS, E->thenBlock(), -1);
SS << " ";
printBlockLabel(SS, E->elseBlock(), E->elseIndex());
printBlockLabel(SS, E->elseBlock(), -1);
}
void printReturn(const Return *E, StreamType &SS) {
SS << "return ";
self()->printSExpr(E->returnValue(), SS, Prec_Other);
}
void printIdentifier(const Identifier *E, StreamType &SS) {

View File

@ -142,20 +142,35 @@ public:
assert(i < Size && "Array index out of bounds.");
return Data[i];
}
T &back() {
assert(Size && "No elements in the array.");
return Data[Size - 1];
}
const T &back() const {
assert(Size && "No elements in the array.");
return Data[Size - 1];
}
iterator begin() { return Data; }
iterator end() { return Data + Size; }
const_iterator begin() const { return Data; }
iterator end() { return Data + Size; }
const_iterator end() const { return Data + Size; }
const_iterator end() const { return Data + Size; }
const_iterator cbegin() const { return Data; }
const_iterator cend() const { return Data + Size; }
const_iterator cend() const { return Data + Size; }
void push_back(const T &Elem) {
assert(Size < Capacity);
Data[Size++] = Elem;
}
// drop last n elements from array
void drop(unsigned n = 0) {
assert(Size > n);
Size -= n;
}
void setValues(unsigned Sz, const T& C) {
assert(Sz <= Capacity);
Size = Sz;
@ -173,6 +188,37 @@ public:
return J - Osz;
}
// An adaptor to reverse a simple array
class ReverseAdaptor {
public:
ReverseAdaptor(SimpleArray &Array) : Array(Array) {}
// A reverse iterator used by the reverse adaptor
class Iterator {
public:
Iterator(T *Data) : Data(Data) {}
T &operator*() { return *Data; }
const T &operator*() const { return *Data; }
Iterator &operator++() {
--Data;
return *this;
}
bool operator!=(Iterator Other) { return Data != Other.Data; }
private:
T *Data;
};
Iterator begin() { return Array.end() - 1; }
Iterator end() { return Array.begin() - 1; }
const Iterator begin() const { return Array.end() - 1; }
const Iterator end() const { return Array.begin() - 1; }
private:
SimpleArray &Array;
};
const ReverseAdaptor reverse() const { return ReverseAdaptor(*this); }
ReverseAdaptor reverse() { return ReverseAdaptor(*this); }
private:
// std::max is annoying here, because it requires a reference,
// thus forcing InitialCapacity to be initialized outside the .h file.
@ -187,6 +233,7 @@ private:
size_t Capacity;
};
} // end namespace til
@ -312,6 +359,12 @@ private:
};
inline std::ostream& operator<<(std::ostream& ss, const StringRef str) {
ss << str.data();
return ss;
}
} // end namespace threadSafety
} // end namespace clang

View File

@ -63,11 +63,9 @@ std::string getSourceLiteralString(const clang::Expr *CE) {
namespace til {
// Return true if E is a variable that points to an incomplete Phi node.
static bool isIncompleteVar(const SExpr *E) {
if (const auto *V = dyn_cast<Variable>(E)) {
if (const auto *Ph = dyn_cast<Phi>(V->definition()))
return Ph->status() == Phi::PH_Incomplete;
}
static bool isIncompletePhi(const SExpr *E) {
if (const auto *Ph = dyn_cast<Phi>(E))
return Ph->status() == Phi::PH_Incomplete;
return false;
}
@ -320,6 +318,8 @@ til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
if (auto *V = dyn_cast<til::Variable>(E))
return V->clangDecl();
if (auto *Ph = dyn_cast<til::Phi>(E))
return Ph->clangDecl();
if (auto *P = dyn_cast<til::Project>(E))
return P->clangDecl();
if (auto *L = dyn_cast<til::LiteralPtr>(E))
@ -641,14 +641,14 @@ SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
// If E is trivial returns E.
til::SExpr *SExprBuilder::addStatement(til::SExpr* E, const Stmt *S,
const ValueDecl *VD) {
if (!E || !CurrentBB || til::ThreadSafetyTIL::isTrivial(E))
if (!E || !CurrentBB || E->block() || til::ThreadSafetyTIL::isTrivial(E))
return E;
til::Variable *V = new (Arena) til::Variable(E, VD);
CurrentInstructions.push_back(V);
if (VD)
E = new (Arena) til::Variable(E, VD);
CurrentInstructions.push_back(E);
if (S)
insertStmt(S, V);
return V;
insertStmt(S, E);
return E;
}
@ -705,11 +705,11 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
unsigned ArgIndex = CurrentBlockInfo->ProcessedPredecessors;
assert(ArgIndex > 0 && ArgIndex < NPreds);
til::Variable *V = dyn_cast<til::Variable>(CurrentLVarMap[i].second);
if (V && V->getBlockID() == CurrentBB->blockID()) {
til::SExpr *CurrE = CurrentLVarMap[i].second;
if (CurrE->block() == CurrentBB) {
// We already have a Phi node in the current block,
// so just add the new variable to the Phi node.
til::Phi *Ph = dyn_cast<til::Phi>(V->definition());
til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
assert(Ph && "Expecting Phi node.");
if (E)
Ph->values()[ArgIndex] = E;
@ -718,27 +718,26 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
// Make a new phi node: phi(..., E)
// All phi args up to the current index are set to the current value.
til::SExpr *CurrE = CurrentLVarMap[i].second;
til::Phi *Ph = new (Arena) til::Phi(Arena, NPreds);
Ph->values().setValues(NPreds, nullptr);
for (unsigned PIdx = 0; PIdx < ArgIndex; ++PIdx)
Ph->values()[PIdx] = CurrE;
if (E)
Ph->values()[ArgIndex] = E;
Ph->setClangDecl(CurrentLVarMap[i].first);
// If E is from a back-edge, or either E or CurrE are incomplete, then
// mark this node as incomplete; we may need to remove it later.
if (!E || isIncompleteVar(E) || isIncompleteVar(CurrE)) {
if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
Ph->setStatus(til::Phi::PH_Incomplete);
}
// Add Phi node to current block, and update CurrentLVarMap[i]
auto *Var = new (Arena) til::Variable(Ph, CurrentLVarMap[i].first);
CurrentArguments.push_back(Var);
CurrentArguments.push_back(Ph);
if (Ph->status() == til::Phi::PH_Incomplete)
IncompleteArgs.push_back(Var);
IncompleteArgs.push_back(Ph);
CurrentLVarMap.makeWritable();
CurrentLVarMap.elem(i).second = Var;
CurrentLVarMap.elem(i).second = Ph;
}
@ -812,15 +811,13 @@ void SExprBuilder::mergePhiNodesBackEdge(const CFGBlock *Blk) {
unsigned ArgIndex = BBInfo[Blk->getBlockID()].ProcessedPredecessors;
assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
for (til::Variable *V : BB->arguments()) {
til::Phi *Ph = dyn_cast_or_null<til::Phi>(V->definition());
for (til::SExpr *PE : BB->arguments()) {
til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
assert(Ph && "Expecting Phi Node.");
assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
assert(V->clangDecl() && "No local variable for Phi node.");
til::SExpr *E = lookupVarDecl(V->clangDecl());
til::SExpr *E = lookupVarDecl(Ph->clangDecl());
assert(E && "Couldn't find local variable for Phi node.");
Ph->values()[ArgIndex] = E;
}
}
@ -899,8 +896,8 @@ void SExprBuilder::enterCFGBlockBody(const CFGBlock *B) {
// Push those arguments onto the basic block.
CurrentBB->arguments().reserve(
static_cast<unsigned>(CurrentArguments.size()), Arena);
for (auto *V : CurrentArguments)
CurrentBB->addArgument(V);
for (auto *A : CurrentArguments)
CurrentBB->addArgument(A);
}
@ -934,7 +931,7 @@ void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
til::BasicBlock *BB = *It ? lookupBlock(*It) : nullptr;
// TODO: set index
unsigned Idx = BB ? BB->findPredecessorIndex(CurrentBB) : 0;
til::SExpr *Tm = new (Arena) til::Goto(BB, Idx);
auto *Tm = new (Arena) til::Goto(BB, Idx);
CurrentBB->setTerminator(Tm);
}
else if (N == 2) {
@ -942,9 +939,8 @@ void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
++It;
til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
unsigned Idx1 = BB1 ? BB1->findPredecessorIndex(CurrentBB) : 0;
unsigned Idx2 = BB2 ? BB2->findPredecessorIndex(CurrentBB) : 0;
til::SExpr *Tm = new (Arena) til::Branch(C, BB1, BB2, Idx1, Idx2);
// FIXME: make sure these arent' critical edges.
auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
CurrentBB->setTerminator(Tm);
}
}
@ -971,10 +967,9 @@ void SExprBuilder::exitCFGBlock(const CFGBlock *B) {
void SExprBuilder::exitCFG(const CFGBlock *Last) {
for (auto *V : IncompleteArgs) {
til::Phi *Ph = dyn_cast<til::Phi>(V->definition());
if (Ph && Ph->status() == til::Phi::PH_Incomplete)
simplifyIncompleteArg(V, Ph);
for (auto *Ph : IncompleteArgs) {
if (Ph->status() == til::Phi::PH_Incomplete)
simplifyIncompleteArg(Ph);
}
CurrentArguments.clear();

View File

@ -48,12 +48,20 @@ StringRef getBinaryOpcodeString(TIL_BinaryOpcode Op) {
}
SExpr* Future::force() {
Status = FS_evaluating;
Result = compute();
Status = FS_done;
return Result;
}
unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
unsigned Idx = Predecessors.size();
Predecessors.reserveCheck(1, Arena);
Predecessors.push_back(Pred);
for (Variable *V : Args) {
if (Phi* Ph = dyn_cast<Phi>(V->definition())) {
for (SExpr *E : Args) {
if (Phi* Ph = dyn_cast<Phi>(E)) {
Ph->values().reserveCheck(1, Arena);
Ph->values().push_back(nullptr);
}
@ -61,105 +69,73 @@ unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
return Idx;
}
void BasicBlock::reservePredecessors(unsigned NumPreds) {
Predecessors.reserve(NumPreds, Arena);
for (Variable *V : Args) {
if (Phi* Ph = dyn_cast<Phi>(V->definition())) {
for (SExpr *E : Args) {
if (Phi* Ph = dyn_cast<Phi>(E)) {
Ph->values().reserve(NumPreds, Arena);
}
}
}
void BasicBlock::renumberVars() {
unsigned VID = 0;
for (Variable *V : Args) {
V->setID(BlockID, VID++);
}
for (Variable *V : Instrs) {
V->setID(BlockID, VID++);
}
}
void SCFG::renumberVars() {
for (BasicBlock *B : Blocks) {
B->renumberVars();
}
}
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
const SExpr *getCanonicalVal(const SExpr *E) {
while (auto *V = dyn_cast<Variable>(E)) {
const SExpr *D;
do {
if (V->kind() != Variable::VK_Let)
return V;
D = V->definition();
auto *V2 = dyn_cast<Variable>(D);
if (V2)
V = V2;
else
break;
} while (true);
if (ThreadSafetyTIL::isTrivial(D))
return D;
if (const Phi *Ph = dyn_cast<Phi>(D)) {
while (true) {
if (auto *V = dyn_cast<Variable>(E)) {
if (V->kind() == Variable::VK_Let) {
E = V->definition();
continue;
}
}
if (const Phi *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
}
}
return V;
break;
}
return E;
}
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
// The non-const version will simplify incomplete Phi nodes.
SExpr *simplifyToCanonicalVal(SExpr *E) {
while (auto *V = dyn_cast<Variable>(E)) {
SExpr *D;
do {
while (true) {
if (auto *V = dyn_cast<Variable>(E)) {
if (V->kind() != Variable::VK_Let)
return V;
D = V->definition();
auto *V2 = dyn_cast<Variable>(D);
if (V2)
V = V2;
else
break;
} while (true);
if (ThreadSafetyTIL::isTrivial(D))
return D;
if (Phi *Ph = dyn_cast<Phi>(D)) {
// Eliminate redundant variables, e.g. x = y, or x = 5,
// but keep anything more complicated.
if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
E = V->definition();
continue;
}
return V;
}
if (auto *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_Incomplete)
simplifyIncompleteArg(V, Ph);
simplifyIncompleteArg(Ph);
// Eliminate redundant Phi nodes.
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
}
}
return V;
return E;
}
return E;
}
// Trace the arguments of an incomplete Phi node to see if they have the same
// canonical definition. If so, mark the Phi node as redundant.
// getCanonicalVal() will recursively call simplifyIncompletePhi().
void simplifyIncompleteArg(Variable *V, til::Phi *Ph) {
void simplifyIncompleteArg(til::Phi *Ph) {
assert(Ph && Ph->status() == Phi::PH_Incomplete);
// eliminate infinite recursion -- assume that this node is not redundant.
@ -168,18 +144,200 @@ void simplifyIncompleteArg(Variable *V, til::Phi *Ph) {
SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
if (Ei == V)
if (Ei == Ph)
continue; // Recursive reference to itself. Don't count.
if (Ei != E0) {
return; // Status is already set to MultiVal.
}
}
Ph->setStatus(Phi::PH_SingleVal);
// Eliminate Redundant Phi node.
V->setDefinition(Ph->values()[0]);
}
// Renumbers the arguments and instructions to have unique, sequential IDs.
int BasicBlock::renumberInstrs(int ID) {
for (auto *Arg : Args)
Arg->setID(this, ID++);
for (auto *Instr : Instrs)
Instr->setID(this, ID++);
TermInstr->setID(this, ID++);
return ID;
}
// Sorts the CFGs blocks using a reverse post-order depth-first traversal.
// Each block will be written into the Blocks array in order, and its BlockID
// will be set to the index in the array. Sorting should start from the entry
// block, and ID should be the total number of blocks.
int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
if (Visited) return ID;
Visited = 1;
for (auto *Block : successors())
ID = Block->topologicalSort(Blocks, ID);
// set ID and update block array in place.
// We may lose pointers to unreachable blocks.
assert(ID > 0);
BlockID = --ID;
Blocks[BlockID] = this;
return ID;
}
// Performs a reverse topological traversal, starting from the exit block and
// following back-edges. The dominator is serialized before any predecessors,
// which guarantees that all blocks are serialized after their dominator and
// before their post-dominator (because it's a reverse topological traversal).
// ID should be initially set to 0.
//
// This sort assumes that (1) dominators have been computed, (2) there are no
// critical edges, and (3) the entry block is reachable from the exit block
// and no blocks are accessable via traversal of back-edges from the exit that
// weren't accessable via forward edges from the entry.
int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
// Visited is assumed to have been set by the topologicalSort. This pass
// assumes !Visited means that we've visited this node before.
if (!Visited) return ID;
Visited = 0;
if (DominatorNode.Parent)
ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
for (auto *Pred : Predecessors)
ID = Pred->topologicalFinalSort(Blocks, ID);
assert(ID < Blocks.size());
BlockID = ID++;
Blocks[BlockID] = this;
return ID;
}
// Computes the immediate dominator of the current block. Assumes that all of
// its predecessors have already computed their dominators. This is achieved
// by visiting the nodes in topological order.
void BasicBlock::computeDominator() {
BasicBlock *Candidate = nullptr;
// Walk backwards from each predecessor to find the common dominator node.
for (auto *Pred : Predecessors) {
// Skip back-edges
if (Pred->BlockID >= BlockID) continue;
// If we don't yet have a candidate for dominator yet, take this one.
if (Candidate == nullptr) {
Candidate = Pred;
continue;
}
// Walk the alternate and current candidate back to find a common ancestor.
auto *Alternate = Pred;
while (Alternate != Candidate) {
if (Candidate->BlockID > Alternate->BlockID)
Candidate = Candidate->DominatorNode.Parent;
else
Alternate = Alternate->DominatorNode.Parent;
}
}
DominatorNode.Parent = Candidate;
DominatorNode.SizeOfSubTree = 1;
}
// Computes the immediate post-dominator of the current block. Assumes that all
// of its successors have already computed their post-dominators. This is
// achieved visiting the nodes in reverse topological order.
void BasicBlock::computePostDominator() {
BasicBlock *Candidate = nullptr;
// Walk back from each predecessor to find the common post-dominator node.
for (auto *Succ : successors()) {
// Skip back-edges
if (Succ->BlockID <= BlockID) continue;
// If we don't yet have a candidate for post-dominator yet, take this one.
if (Candidate == nullptr) {
Candidate = Succ;
continue;
}
// Walk the alternate and current candidate back to find a common ancestor.
auto *Alternate = Succ;
while (Alternate != Candidate) {
if (Candidate->BlockID < Alternate->BlockID)
Candidate = Candidate->PostDominatorNode.Parent;
else
Alternate = Alternate->PostDominatorNode.Parent;
}
}
PostDominatorNode.Parent = Candidate;
PostDominatorNode.SizeOfSubTree = 1;
}
// Renumber instructions in all blocks
void SCFG::renumberInstrs() {
int InstrID = 0;
for (auto *Block : Blocks)
InstrID = Block->renumberInstrs(InstrID);
}
static inline void computeNodeSize(BasicBlock *B,
BasicBlock::TopologyNode BasicBlock::*TN) {
BasicBlock::TopologyNode *N = &(B->*TN);
if (N->Parent) {
BasicBlock::TopologyNode *P = &(N->Parent->*TN);
// Initially set ID relative to the (as yet uncomputed) parent ID
N->NodeID = P->SizeOfSubTree;
P->SizeOfSubTree += N->SizeOfSubTree;
}
}
static inline void computeNodeID(BasicBlock *B,
BasicBlock::TopologyNode BasicBlock::*TN) {
BasicBlock::TopologyNode *N = &(B->*TN);
if (N->Parent) {
BasicBlock::TopologyNode *P = &(N->Parent->*TN);
N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
}
}
// Normalizes a CFG. Normalization has a few major components:
// 1) Removing unreachable blocks.
// 2) Computing dominators and post-dominators
// 3) Topologically sorting the blocks into the "Blocks" array.
void SCFG::computeNormalForm() {
// Topologically sort the blocks starting from the entry block.
int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
if (NumUnreachableBlocks > 0) {
// If there were unreachable blocks shift everything down, and delete them.
for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
size_t NI = I - NumUnreachableBlocks;
Blocks[NI] = Blocks[I];
Blocks[NI]->BlockID = NI;
// FIXME: clean up predecessor pointers to unreachable blocks?
}
Blocks.drop(NumUnreachableBlocks);
}
// Compute dominators.
for (auto *Block : Blocks)
Block->computeDominator();
// Once dominators have been computed, the final sort may be performed.
int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
assert(NumBlocks == Blocks.size());
(void) NumBlocks;
// Renumber the instructions now that we have a final sort.
renumberInstrs();
// Compute post-dominators and compute the sizes of each node in the
// dominator tree.
for (auto *Block : Blocks.reverse()) {
Block->computePostDominator();
computeNodeSize(Block, &BasicBlock::DominatorNode);
}
// Compute the sizes of each node in the post-dominator tree and assign IDs in
// the dominator tree.
for (auto *Block : Blocks) {
computeNodeID(Block, &BasicBlock::DominatorNode);
computeNodeSize(Block, &BasicBlock::PostDominatorNode);
}
// Assign IDs in the post-dominator tree.
for (auto *Block : Blocks.reverse()) {
computeNodeID(Block, &BasicBlock::PostDominatorNode);
}
}
} // end namespace til
} // end namespace threadSafety
} // end namespace clang