2014-04-23 16:08:49 +08:00
|
|
|
//===- LazyCallGraphTest.cpp - Unit tests for the lazy CG analysis --------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Analysis/LazyCallGraph.h"
|
|
|
|
#include "llvm/AsmParser/Parser.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/SourceMgr.h"
|
|
|
|
#include "gtest/gtest.h"
|
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
std::unique_ptr<Module> parseAssembly(const char *Assembly) {
|
|
|
|
SMDiagnostic Error;
|
2014-08-20 00:58:54 +08:00
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssemblyString(Assembly, Error, getGlobalContext());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
|
|
|
std::string ErrMsg;
|
|
|
|
raw_string_ostream OS(ErrMsg);
|
|
|
|
Error.print("", OS);
|
|
|
|
|
|
|
|
// A failure here means that the test itself is buggy.
|
2014-08-20 00:58:54 +08:00
|
|
|
if (!M)
|
2014-04-23 16:08:49 +08:00
|
|
|
report_fatal_error(OS.str().c_str());
|
|
|
|
|
|
|
|
return M;
|
|
|
|
}
|
|
|
|
|
2014-10-22 10:16:06 +08:00
|
|
|
/*
|
|
|
|
IR forming a call graph with a diamond of triangle-shaped SCCs:
|
|
|
|
|
|
|
|
d1
|
|
|
|
/ \
|
|
|
|
d3--d2
|
|
|
|
/ \
|
|
|
|
b1 c1
|
|
|
|
/ \ / \
|
|
|
|
b3--b2 c3--c2
|
|
|
|
\ /
|
|
|
|
a1
|
|
|
|
/ \
|
|
|
|
a3--a2
|
|
|
|
|
|
|
|
All call edges go up between SCCs, and clockwise around the SCC.
|
|
|
|
*/
|
2014-04-23 16:08:49 +08:00
|
|
|
static const char DiamondOfTriangles[] =
|
|
|
|
"define void @a1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a2()\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @a2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @a3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" call void @d3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c2()\n"
|
|
|
|
" call void @d2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n";
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, BasicGraphFormation) {
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// The order of the entry nodes should be stable w.r.t. the source order of
|
|
|
|
// the IR, and everything in our module is an entry node, so just directly
|
|
|
|
// build variables for each node.
|
|
|
|
auto I = CG.begin();
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &A1 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("a1", A1.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &A2 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("a2", A2.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &A3 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("a3", A3.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &B1 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("b1", B1.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &B2 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("b2", B2.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &B3 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("b3", B3.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &C1 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("c1", C1.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &C2 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("c2", C2.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &C3 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("c3", C3.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &D1 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("d1", D1.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &D2 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("d2", D2.getFunction().getName());
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &D3 = (I++)->getNode(CG);
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("d3", D3.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ(CG.end(), I);
|
|
|
|
|
|
|
|
// Build vectors and sort them for the rest of the assertions to make them
|
|
|
|
// independent of order.
|
|
|
|
std::vector<std::string> Nodes;
|
|
|
|
|
2016-02-02 11:57:13 +08:00
|
|
|
for (LazyCallGraph::Edge &E : A1)
|
|
|
|
Nodes.push_back(E.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ("a2", Nodes[0]);
|
|
|
|
EXPECT_EQ("b2", Nodes[1]);
|
|
|
|
EXPECT_EQ("c3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ(A2.end(), std::next(A2.begin()));
|
|
|
|
EXPECT_EQ("a3", A2.begin()->getFunction().getName());
|
|
|
|
EXPECT_EQ(A3.end(), std::next(A3.begin()));
|
|
|
|
EXPECT_EQ("a1", A3.begin()->getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
2016-02-02 11:57:13 +08:00
|
|
|
for (LazyCallGraph::Edge &E : B1)
|
|
|
|
Nodes.push_back(E.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ("b2", Nodes[0]);
|
|
|
|
EXPECT_EQ("d3", Nodes[1]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ(B2.end(), std::next(B2.begin()));
|
|
|
|
EXPECT_EQ("b3", B2.begin()->getFunction().getName());
|
|
|
|
EXPECT_EQ(B3.end(), std::next(B3.begin()));
|
|
|
|
EXPECT_EQ("b1", B3.begin()->getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
2016-02-02 11:57:13 +08:00
|
|
|
for (LazyCallGraph::Edge &E : C1)
|
|
|
|
Nodes.push_back(E.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ("c2", Nodes[0]);
|
|
|
|
EXPECT_EQ("d2", Nodes[1]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ(C2.end(), std::next(C2.begin()));
|
|
|
|
EXPECT_EQ("c3", C2.begin()->getFunction().getName());
|
|
|
|
EXPECT_EQ(C3.end(), std::next(C3.begin()));
|
|
|
|
EXPECT_EQ("c1", C3.begin()->getFunction().getName());
|
|
|
|
|
|
|
|
EXPECT_EQ(D1.end(), std::next(D1.begin()));
|
|
|
|
EXPECT_EQ("d2", D1.begin()->getFunction().getName());
|
|
|
|
EXPECT_EQ(D2.end(), std::next(D2.begin()));
|
|
|
|
EXPECT_EQ("d3", D2.begin()->getFunction().getName());
|
|
|
|
EXPECT_EQ(D3.end(), std::next(D3.begin()));
|
|
|
|
EXPECT_EQ("d1", D3.begin()->getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Now lets look at the RefSCCs and SCCs.
|
|
|
|
auto J = CG.postorder_ref_scc_begin();
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &D = *J++;
|
|
|
|
ASSERT_EQ(1, D.size());
|
|
|
|
for (LazyCallGraph::Node &N : *D.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("d1", Nodes[0]);
|
|
|
|
EXPECT_EQ("d2", Nodes[1]);
|
|
|
|
EXPECT_EQ("d3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_FALSE(D.isParentOf(D));
|
|
|
|
EXPECT_FALSE(D.isChildOf(D));
|
|
|
|
EXPECT_FALSE(D.isAncestorOf(D));
|
|
|
|
EXPECT_FALSE(D.isDescendantOf(D));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &C = *J++;
|
|
|
|
ASSERT_EQ(1, C.size());
|
|
|
|
for (LazyCallGraph::Node &N : *C.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("c1", Nodes[0]);
|
|
|
|
EXPECT_EQ("c2", Nodes[1]);
|
|
|
|
EXPECT_EQ("c3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_TRUE(C.isParentOf(D));
|
|
|
|
EXPECT_FALSE(C.isChildOf(D));
|
|
|
|
EXPECT_TRUE(C.isAncestorOf(D));
|
|
|
|
EXPECT_FALSE(C.isDescendantOf(D));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &B = *J++;
|
|
|
|
ASSERT_EQ(1, B.size());
|
|
|
|
for (LazyCallGraph::Node &N : *B.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("b1", Nodes[0]);
|
|
|
|
EXPECT_EQ("b2", Nodes[1]);
|
|
|
|
EXPECT_EQ("b3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_TRUE(B.isParentOf(D));
|
|
|
|
EXPECT_FALSE(B.isChildOf(D));
|
|
|
|
EXPECT_TRUE(B.isAncestorOf(D));
|
|
|
|
EXPECT_FALSE(B.isDescendantOf(D));
|
|
|
|
EXPECT_FALSE(B.isAncestorOf(C));
|
|
|
|
EXPECT_FALSE(C.isAncestorOf(B));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &A = *J++;
|
|
|
|
ASSERT_EQ(1, A.size());
|
|
|
|
for (LazyCallGraph::Node &N : *A.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("a1", Nodes[0]);
|
|
|
|
EXPECT_EQ("a2", Nodes[1]);
|
|
|
|
EXPECT_EQ("a3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_TRUE(A.isParentOf(B));
|
|
|
|
EXPECT_TRUE(A.isParentOf(C));
|
|
|
|
EXPECT_FALSE(A.isParentOf(D));
|
|
|
|
EXPECT_TRUE(A.isAncestorOf(B));
|
|
|
|
EXPECT_TRUE(A.isAncestorOf(C));
|
|
|
|
EXPECT_TRUE(A.isAncestorOf(D));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), J);
|
2014-04-23 16:08:49 +08:00
|
|
|
}
|
|
|
|
|
2014-04-23 18:31:17 +08:00
|
|
|
static Function &lookupFunction(Module &M, StringRef Name) {
|
|
|
|
for (Function &F : M)
|
|
|
|
if (F.getName() == Name)
|
|
|
|
return F;
|
|
|
|
report_fatal_error("Couldn't find function!");
|
|
|
|
}
|
|
|
|
|
2014-04-28 19:10:23 +08:00
|
|
|
TEST(LazyCallGraphTest, BasicGraphMutation) {
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = CG.get(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = CG.get(lookupFunction(*M, "b"));
|
|
|
|
EXPECT_EQ(2, std::distance(A.begin(), A.end()));
|
|
|
|
EXPECT_EQ(0, std::distance(B.begin(), B.end()));
|
|
|
|
|
2016-02-02 11:57:13 +08:00
|
|
|
CG.insertEdge(B, lookupFunction(*M, "c"), LazyCallGraph::Edge::Call);
|
2014-04-28 19:10:23 +08:00
|
|
|
EXPECT_EQ(1, std::distance(B.begin(), B.end()));
|
2016-02-02 11:57:13 +08:00
|
|
|
LazyCallGraph::Node &C = B.begin()->getNode(CG);
|
2014-04-28 19:10:23 +08:00
|
|
|
EXPECT_EQ(0, std::distance(C.begin(), C.end()));
|
|
|
|
|
2016-02-02 11:57:13 +08:00
|
|
|
CG.insertEdge(C, B.getFunction(), LazyCallGraph::Edge::Call);
|
2014-04-28 19:10:23 +08:00
|
|
|
EXPECT_EQ(1, std::distance(C.begin(), C.end()));
|
2016-02-02 11:57:13 +08:00
|
|
|
EXPECT_EQ(&B, C.begin()->getNode());
|
2014-04-28 19:10:23 +08:00
|
|
|
|
2016-02-02 11:57:13 +08:00
|
|
|
CG.insertEdge(C, C.getFunction(), LazyCallGraph::Edge::Call);
|
2014-04-28 19:10:23 +08:00
|
|
|
EXPECT_EQ(2, std::distance(C.begin(), C.end()));
|
2016-02-02 11:57:13 +08:00
|
|
|
EXPECT_EQ(&B, C.begin()->getNode());
|
|
|
|
EXPECT_EQ(&C, std::next(C.begin())->getNode());
|
[LCG] Actually test the *basic* edge removal bits (IE, the non-SCC
bits), and discover that it's totally broken. Yay tests. Boo bug. Fix
the basic edge removal so that it works by nulling out the removed edges
rather than actually removing them. This leaves the indices valid in the
map from callee to index, and preserves some of the locality for
iterating over edges. The iterator is made bidirectional to reflect that
it now has to skip over null entries, and the skipping logic is layered
onto it.
As future work, I would like to track essentially the "load factor" of
the edge list, and when it falls below a threshold do a compaction.
An alternative I considered (and continue to consider) is storing the
callees in a doubly linked list where each element of the list is in
a set (which is essentially the classical linked-hash-table
datastructure). The problem with that approach is that either you need
to heap allocate the linked list nodes and use pointers to them, or use
a bucket hash table (with even *more* linked list pointer overhead!),
etc. It's pretty easy to get 5x overhead for values that are just
pointers. So far, I think punching holes in the vector, and periodic
compaction is likely to be much more efficient overall in the space/time
tradeoff.
llvm-svn: 207619
2014-04-30 15:45:27 +08:00
|
|
|
|
|
|
|
CG.removeEdge(C, B.getFunction());
|
|
|
|
EXPECT_EQ(1, std::distance(C.begin(), C.end()));
|
2016-02-02 11:57:13 +08:00
|
|
|
EXPECT_EQ(&C, C.begin()->getNode());
|
[LCG] Actually test the *basic* edge removal bits (IE, the non-SCC
bits), and discover that it's totally broken. Yay tests. Boo bug. Fix
the basic edge removal so that it works by nulling out the removed edges
rather than actually removing them. This leaves the indices valid in the
map from callee to index, and preserves some of the locality for
iterating over edges. The iterator is made bidirectional to reflect that
it now has to skip over null entries, and the skipping logic is layered
onto it.
As future work, I would like to track essentially the "load factor" of
the edge list, and when it falls below a threshold do a compaction.
An alternative I considered (and continue to consider) is storing the
callees in a doubly linked list where each element of the list is in
a set (which is essentially the classical linked-hash-table
datastructure). The problem with that approach is that either you need
to heap allocate the linked list nodes and use pointers to them, or use
a bucket hash table (with even *more* linked list pointer overhead!),
etc. It's pretty easy to get 5x overhead for values that are just
pointers. So far, I think punching holes in the vector, and periodic
compaction is likely to be much more efficient overall in the space/time
tradeoff.
llvm-svn: 207619
2014-04-30 15:45:27 +08:00
|
|
|
|
|
|
|
CG.removeEdge(C, C.getFunction());
|
|
|
|
EXPECT_EQ(0, std::distance(C.begin(), C.end()));
|
|
|
|
|
|
|
|
CG.removeEdge(B, C.getFunction());
|
|
|
|
EXPECT_EQ(0, std::distance(B.begin(), B.end()));
|
2014-04-28 19:10:23 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InnerSCCFormation) {
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Now mutate the graph to connect every node into a single RefSCC to ensure
|
|
|
|
// that our inner SCC formation handles the rest.
|
|
|
|
CG.insertEdge(lookupFunction(*M, "d1"), lookupFunction(*M, "a1"),
|
|
|
|
LazyCallGraph::Edge::Ref);
|
|
|
|
|
|
|
|
// Build vectors and sort them for the rest of the assertions to make them
|
|
|
|
// independent of order.
|
|
|
|
std::vector<std::string> Nodes;
|
|
|
|
|
|
|
|
// We should build a single RefSCC for the entire graph.
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
// Now walk the four SCCs which should be in post-order.
|
|
|
|
auto J = RC.begin();
|
|
|
|
LazyCallGraph::SCC &D = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : D)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("d1", Nodes[0]);
|
|
|
|
EXPECT_EQ("d2", Nodes[1]);
|
|
|
|
EXPECT_EQ("d3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &B = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : B)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("b1", Nodes[0]);
|
|
|
|
EXPECT_EQ("b2", Nodes[1]);
|
|
|
|
EXPECT_EQ("b3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &C = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : C)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("c1", Nodes[0]);
|
|
|
|
EXPECT_EQ("c2", Nodes[1]);
|
|
|
|
EXPECT_EQ("c3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &A = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : A)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
|
|
|
std::sort(Nodes.begin(), Nodes.end());
|
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("a1", Nodes[0]);
|
|
|
|
EXPECT_EQ("a2", Nodes[1]);
|
|
|
|
EXPECT_EQ("a3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
EXPECT_EQ(RC.end(), J);
|
|
|
|
}
|
|
|
|
|
2014-04-23 18:31:17 +08:00
|
|
|
TEST(LazyCallGraphTest, MultiArmSCC) {
|
|
|
|
// Two interlocking cycles. The really useful thing about this SCC is that it
|
|
|
|
// will require Tarjan's DFS to backtrack and finish processing all of the
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// children of each node in the SCC. Since this involves call edges, both
|
|
|
|
// Tarjan implementations will have to successfully navigate the structure.
|
2014-04-23 18:31:17 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @f1() {\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" call void @f2()\n"
|
|
|
|
" call void @f4()\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @f2() {\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" call void @f3()\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @f3() {\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" call void @f1()\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @f4() {\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" call void @f5()\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @f5() {\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" call void @f1()\n"
|
2014-04-23 18:31:17 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &N1 = *CG.lookup(lookupFunction(*M, "f1"));
|
|
|
|
LazyCallGraph::Node &N2 = *CG.lookup(lookupFunction(*M, "f2"));
|
|
|
|
LazyCallGraph::Node &N3 = *CG.lookup(lookupFunction(*M, "f3"));
|
|
|
|
LazyCallGraph::Node &N4 = *CG.lookup(lookupFunction(*M, "f4"));
|
|
|
|
LazyCallGraph::Node &N5 = *CG.lookup(lookupFunction(*M, "f4"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N1));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N2));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N3));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N4));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N5));
|
|
|
|
|
|
|
|
ASSERT_EQ(1, RC.size());
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &C = *RC.begin();
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N1));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N2));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N3));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N4));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N5));
|
2014-04-23 18:31:17 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
|
2014-05-04 17:38:23 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
|
|
|
(void)RC;
|
2014-05-04 17:38:23 +08:00
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(BRC));
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(CRC));
|
|
|
|
EXPECT_FALSE(ARC.isParentOf(DRC));
|
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
|
|
|
EXPECT_FALSE(DRC.isChildOf(ARC));
|
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
|
|
|
EXPECT_TRUE(DRC.isChildOf(BRC));
|
|
|
|
EXPECT_TRUE(DRC.isChildOf(CRC));
|
2014-05-04 17:38:23 +08:00
|
|
|
|
|
|
|
EXPECT_EQ(2, std::distance(A.begin(), A.end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ARC.insertOutgoingEdge(A, D, LazyCallGraph::Edge::Call);
|
2014-05-04 17:38:23 +08:00
|
|
|
EXPECT_EQ(3, std::distance(A.begin(), A.end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
const LazyCallGraph::Edge &NewE = A[D];
|
|
|
|
EXPECT_TRUE(NewE);
|
|
|
|
EXPECT_TRUE(NewE.isCall());
|
|
|
|
EXPECT_EQ(&D, NewE.getNode());
|
|
|
|
|
|
|
|
// Only the parent and child tests sholud have changed. The rest of the graph
|
|
|
|
// remains the same.
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(DRC));
|
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
|
|
|
EXPECT_TRUE(DRC.isChildOf(ARC));
|
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
2014-05-04 17:38:23 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
ARC.switchOutgoingEdgeToRef(A, D);
|
|
|
|
EXPECT_FALSE(NewE.isCall());
|
|
|
|
|
|
|
|
// Verify the graph remains the same.
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(DRC));
|
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
|
|
|
EXPECT_TRUE(DRC.isChildOf(ARC));
|
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
ARC.switchOutgoingEdgeToCall(A, D);
|
|
|
|
EXPECT_TRUE(NewE.isCall());
|
|
|
|
|
|
|
|
// Verify the graph remains the same.
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(DRC));
|
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
|
|
|
EXPECT_TRUE(DRC.isChildOf(ARC));
|
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
ARC.removeOutgoingEdge(A, D);
|
|
|
|
EXPECT_EQ(2, std::distance(A.begin(), A.end()));
|
|
|
|
|
|
|
|
// Now the parent and child tests fail again but the rest remains the same.
|
|
|
|
EXPECT_FALSE(ARC.isParentOf(DRC));
|
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
|
|
|
EXPECT_FALSE(DRC.isChildOf(ARC));
|
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
2014-05-04 17:38:23 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
|
2014-05-04 17:38:32 +08:00
|
|
|
// We want to ensure we can add edges even across complex diamond graphs, so
|
|
|
|
// we use the diamond of triangles graph defined above. The ascii diagram is
|
|
|
|
// repeated here for easy reference.
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / \ |
|
|
|
|
// d3--d2 |
|
|
|
|
// / \ |
|
|
|
|
// b1 c1 |
|
|
|
|
// / \ / \ |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
|
|
|
//
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
|
|
|
(void)RC;
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
|
|
|
|
LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
|
|
|
|
LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
|
|
|
|
LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
|
|
|
|
LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
|
|
|
|
LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
|
|
|
|
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
|
|
|
|
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
|
2014-05-04 17:38:32 +08:00
|
|
|
ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
|
|
|
|
|
|
|
|
// Add an edge to make the graph:
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / \ |
|
|
|
|
// d3--d2---. |
|
|
|
|
// / \ | |
|
|
|
|
// b1 c1 | |
|
|
|
|
// / \ / \ / |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
|
2014-05-04 17:38:32 +08:00
|
|
|
// Make sure we connected the nodes.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
for (LazyCallGraph::Edge E : D2) {
|
|
|
|
if (E.getNode() == &D3)
|
|
|
|
continue;
|
|
|
|
EXPECT_EQ(&C2, E.getNode());
|
|
|
|
}
|
|
|
|
// And marked the D ref-SCC as no longer valid.
|
|
|
|
EXPECT_EQ(1u, MergedRCs.size());
|
|
|
|
EXPECT_EQ(&DRC, MergedRCs[0]);
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
// Make sure we have the correct nodes in the SCC sets.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
// And that ancestry tests have been updated.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isParentOf(CRC));
|
|
|
|
EXPECT_TRUE(BRC.isParentOf(CRC));
|
2014-05-04 17:38:32 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, IncomingEdgeInsertionMidTraversal) {
|
2014-05-04 17:38:32 +08:00
|
|
|
// This is the same fundamental test as the previous, but we perform it
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// having only partially walked the RefSCCs of the graph.
|
2014-05-04 17:38:32 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(DiamondOfTriangles);
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Walk the RefSCCs until we find the one containing 'c1'.
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
ASSERT_NE(I, E);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *I;
|
|
|
|
ASSERT_NE(&DRC, nullptr);
|
|
|
|
++I;
|
|
|
|
ASSERT_NE(I, E);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *I;
|
|
|
|
ASSERT_NE(&CRC, nullptr);
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a1")));
|
|
|
|
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a2")));
|
|
|
|
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "a3")));
|
|
|
|
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b1")));
|
|
|
|
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b2")));
|
|
|
|
ASSERT_EQ(nullptr, CG.lookup(lookupFunction(*M, "b3")));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
|
|
|
|
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
|
|
|
|
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C1));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D1));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
|
2014-05-04 17:38:32 +08:00
|
|
|
ASSERT_EQ(1, std::distance(D2.begin(), D2.end()));
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
|
|
|
|
// Make sure we connected the nodes.
|
|
|
|
for (LazyCallGraph::Edge E : D2) {
|
|
|
|
if (E.getNode() == &D3)
|
|
|
|
continue;
|
|
|
|
EXPECT_EQ(&C2, E.getNode());
|
|
|
|
}
|
|
|
|
// And marked the D ref-SCC as no longer valid.
|
|
|
|
EXPECT_EQ(1u, MergedRCs.size());
|
|
|
|
EXPECT_EQ(&DRC, MergedRCs[0]);
|
|
|
|
|
|
|
|
// Make sure we have the correct nodes in the RefSCCs.
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
|
|
|
|
|
|
|
|
// Check that we can form the last two RefSCCs now in a coherent way.
|
|
|
|
++I;
|
|
|
|
EXPECT_NE(I, E);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *I;
|
|
|
|
EXPECT_NE(&BRC, nullptr);
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b1"))));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b2"))));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "b3"))));
|
|
|
|
EXPECT_TRUE(BRC.isParentOf(CRC));
|
|
|
|
++I;
|
|
|
|
EXPECT_NE(I, E);
|
|
|
|
LazyCallGraph::RefSCC &ARC = *I;
|
|
|
|
EXPECT_NE(&ARC, nullptr);
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a1"))));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a2"))));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(*CG.lookup(lookupFunction(*M, "a3"))));
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(CRC));
|
|
|
|
++I;
|
|
|
|
EXPECT_EQ(E, I);
|
2014-05-04 17:38:32 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalEdgeMutation) {
|
2014-04-23 19:03:03 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
2014-04-23 19:03:03 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
2014-04-24 07:12:06 +08:00
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(1, RC.size());
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
|
|
|
|
|
|
|
|
// Insert an edge from 'a' to 'c'. Nothing changes about the graph.
|
|
|
|
RC.insertInternalRefEdge(A, C);
|
|
|
|
EXPECT_EQ(2, std::distance(A.begin(), A.end()));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(1, RC.size());
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
|
|
|
|
|
|
|
|
// Switch the call edge from 'b' to 'c' to a ref edge. This will break the
|
|
|
|
// call cycle and cause us to form more SCCs. The RefSCC will remain the same
|
|
|
|
// though.
|
|
|
|
RC.switchInternalEdgeToRef(B, C);
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
auto J = RC.begin();
|
|
|
|
// The SCCs must be in *post-order* which means successors before
|
|
|
|
// predecessors. At this point we have call edges from C to A and from A to
|
|
|
|
// B. The only valid postorder is B, A, C.
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(RC.end(), J);
|
|
|
|
|
|
|
|
// Test turning the ref edge from A to C into a call edge. This will form an
|
|
|
|
// SCC out of A and C. Since we previously had a call edge from C to A, the
|
|
|
|
// C SCC should be preserved and have A merged into it while the A SCC should
|
|
|
|
// be invalidated.
|
2014-04-24 07:12:06 +08:00
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
auto InvalidatedSCCs = RC.switchInternalEdgeToCall(A, C);
|
|
|
|
ASSERT_EQ(1u, InvalidatedSCCs.size());
|
|
|
|
EXPECT_EQ(&AC, InvalidatedSCCs[0]);
|
|
|
|
EXPECT_EQ(2, CC.size());
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
J = RC.begin();
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(RC.end(), J);
|
2014-04-23 19:03:03 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalEdgeRemoval) {
|
|
|
|
// A nice fully connected (including self-edges) RefSCC.
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a(i8** %ptr) {\n"
|
2014-04-30 18:48:36 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
2014-04-30 18:48:36 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @b(i8** %ptr) {\n"
|
2014-04-30 18:48:36 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
2014-04-30 18:48:36 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
"define void @c(i8** %ptr) {\n"
|
2014-04-30 18:48:36 +08:00
|
|
|
"entry:\n"
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
2014-04-30 18:48:36 +08:00
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph CG(*M);
|
2014-04-30 18:48:36 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
2014-04-30 18:48:36 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
|
|
|
|
// Remove the edge from b -> a, which should leave the 3 functions still in
|
|
|
|
// a single connected component because of a -> b -> c -> a.
|
|
|
|
SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
|
|
|
|
RC.removeInternalRefEdge(B, A);
|
|
|
|
EXPECT_EQ(0u, NewRCs.size());
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
|
|
|
|
// Remove the edge from c -> a, which should leave 'a' in the original RefSCC
|
|
|
|
// and form a new RefSCC for 'b' and 'c'.
|
|
|
|
NewRCs = RC.removeInternalRefEdge(C, A);
|
|
|
|
EXPECT_EQ(1u, NewRCs.size());
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(1, std::distance(RC.begin(), RC.end()));
|
|
|
|
LazyCallGraph::RefSCC *RC2 = CG.lookupRefSCC(B);
|
|
|
|
EXPECT_EQ(RC2, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(RC2, NewRCs[0]);
|
2014-04-30 18:48:36 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalCallEdgeToRef) {
|
|
|
|
// A nice fully connected (including self-edges) SCC (and RefSCC)
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
2014-04-23 19:03:03 +08:00
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph CG(*M);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(1, RC.size());
|
|
|
|
LazyCallGraph::SCC &CallC = *RC.begin();
|
2014-04-23 19:03:03 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(C));
|
|
|
|
|
|
|
|
// Remove the call edge from b -> a to a ref edge, which should leave the
|
|
|
|
// 3 functions still in a single connected component because of a -> b ->
|
|
|
|
// c -> a.
|
|
|
|
RC.switchInternalEdgeToRef(B, A);
|
|
|
|
EXPECT_EQ(1, RC.size());
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(C));
|
2014-04-23 19:03:03 +08:00
|
|
|
|
|
|
|
// Remove the edge from c -> a, which should leave 'a' in the original SCC
|
|
|
|
// and form a new SCC for 'b' and 'c'.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
RC.switchInternalEdgeToRef(C, A);
|
|
|
|
EXPECT_EQ(2, RC.size());
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(A));
|
|
|
|
LazyCallGraph::SCC &BCallC = *CG.lookupSCC(B);
|
|
|
|
EXPECT_NE(&BCallC, &CallC);
|
|
|
|
EXPECT_EQ(&BCallC, CG.lookupSCC(C));
|
|
|
|
auto J = RC.find(CallC);
|
|
|
|
EXPECT_EQ(&CallC, &*J);
|
|
|
|
--J;
|
|
|
|
EXPECT_EQ(&BCallC, &*J);
|
|
|
|
EXPECT_EQ(RC.begin(), J);
|
|
|
|
|
|
|
|
// Remove the edge from c -> b, which should leave 'b' in the original SCC
|
|
|
|
// and form a new SCC for 'c'. It shouldn't change 'a's SCC.
|
|
|
|
RC.switchInternalEdgeToRef(C, B);
|
|
|
|
EXPECT_EQ(3, RC.size());
|
|
|
|
EXPECT_EQ(&CallC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BCallC, CG.lookupSCC(B));
|
|
|
|
LazyCallGraph::SCC &CCallC = *CG.lookupSCC(C);
|
|
|
|
EXPECT_NE(&CCallC, &CallC);
|
|
|
|
EXPECT_NE(&CCallC, &BCallC);
|
|
|
|
J = RC.find(CallC);
|
|
|
|
EXPECT_EQ(&CallC, &*J);
|
|
|
|
--J;
|
|
|
|
EXPECT_EQ(&BCallC, &*J);
|
|
|
|
--J;
|
|
|
|
EXPECT_EQ(&CCallC, &*J);
|
|
|
|
EXPECT_EQ(RC.begin(), J);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
|
|
|
|
// Basic tests for making a ref edge a call. This hits the basics of the
|
|
|
|
// process only.
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" store void()* @d, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @c, void()** undef\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @b, void()** undef\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @a, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
|
|
|
|
// Check the initial post-order. Note that B and C could be flipped here (and
|
|
|
|
// in our mutation) without changing the nature of this test.
|
|
|
|
ASSERT_EQ(4, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&BC, &RC[1]);
|
|
|
|
EXPECT_EQ(&CC, &RC[2]);
|
|
|
|
EXPECT_EQ(&AC, &RC[3]);
|
|
|
|
|
|
|
|
// Switch the ref edge from A -> D to a call edge. This should have no
|
|
|
|
// effect as it is already in postorder and no new cycles are formed.
|
|
|
|
auto MergedCs = RC.switchInternalEdgeToCall(A, D);
|
|
|
|
EXPECT_EQ(0u, MergedCs.size());
|
|
|
|
ASSERT_EQ(4, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&BC, &RC[1]);
|
|
|
|
EXPECT_EQ(&CC, &RC[2]);
|
|
|
|
EXPECT_EQ(&AC, &RC[3]);
|
|
|
|
|
|
|
|
// Switch B -> C to a call edge. This doesn't form any new cycles but does
|
|
|
|
// require reordering the SCCs.
|
|
|
|
MergedCs = RC.switchInternalEdgeToCall(B, C);
|
|
|
|
EXPECT_EQ(0u, MergedCs.size());
|
|
|
|
ASSERT_EQ(4, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&CC, &RC[1]);
|
|
|
|
EXPECT_EQ(&BC, &RC[2]);
|
|
|
|
EXPECT_EQ(&AC, &RC[3]);
|
|
|
|
|
|
|
|
// Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
|
|
|
|
MergedCs = RC.switchInternalEdgeToCall(C, B);
|
|
|
|
ASSERT_EQ(1u, MergedCs.size());
|
|
|
|
EXPECT_EQ(&CC, MergedCs[0]);
|
|
|
|
ASSERT_EQ(3, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&BC, &RC[1]);
|
|
|
|
EXPECT_EQ(&AC, &RC[2]);
|
|
|
|
EXPECT_EQ(2, BC.size());
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(C));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
|
|
|
|
// Test for having a post-order prior to changing a ref edge to a call edge
|
|
|
|
// with SCCs connecting to the source and connecting to the target, but not
|
|
|
|
// connecting to both, interleaved between the source and target. This
|
|
|
|
// ensures we correctly partition the range rather than simply moving one or
|
|
|
|
// the other.
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b1()\n"
|
|
|
|
" call void @c1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c1()\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" call void @c2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c2()\n"
|
|
|
|
" call void @b3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b3()\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @b1, void()** undef\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @a, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
|
|
|
|
LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
|
|
|
|
LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &B1C = *CG.lookupSCC(B1);
|
|
|
|
LazyCallGraph::SCC &B2C = *CG.lookupSCC(B2);
|
|
|
|
LazyCallGraph::SCC &B3C = *CG.lookupSCC(B3);
|
|
|
|
LazyCallGraph::SCC &C1C = *CG.lookupSCC(C1);
|
|
|
|
LazyCallGraph::SCC &C2C = *CG.lookupSCC(C2);
|
|
|
|
LazyCallGraph::SCC &C3C = *CG.lookupSCC(C3);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
|
|
|
|
// Several call edges are initially present to force a particual post-order.
|
|
|
|
// Remove them now, leaving an interleaved post-order pattern.
|
|
|
|
RC.switchInternalEdgeToRef(B3, C3);
|
|
|
|
RC.switchInternalEdgeToRef(C2, B3);
|
|
|
|
RC.switchInternalEdgeToRef(B2, C2);
|
|
|
|
RC.switchInternalEdgeToRef(C1, B2);
|
|
|
|
RC.switchInternalEdgeToRef(B1, C1);
|
|
|
|
|
|
|
|
// Check the initial post-order. We ensure this order with the extra edges
|
|
|
|
// that are nuked above.
|
|
|
|
ASSERT_EQ(8, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&C3C, &RC[1]);
|
|
|
|
EXPECT_EQ(&B3C, &RC[2]);
|
|
|
|
EXPECT_EQ(&C2C, &RC[3]);
|
|
|
|
EXPECT_EQ(&B2C, &RC[4]);
|
|
|
|
EXPECT_EQ(&C1C, &RC[5]);
|
|
|
|
EXPECT_EQ(&B1C, &RC[6]);
|
|
|
|
EXPECT_EQ(&AC, &RC[7]);
|
|
|
|
|
|
|
|
// Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
|
|
|
|
// require reordering the SCCs in the face of tricky internal node
|
|
|
|
// structures.
|
|
|
|
auto MergedCs = RC.switchInternalEdgeToCall(C3, B1);
|
|
|
|
EXPECT_EQ(0u, MergedCs.size());
|
|
|
|
ASSERT_EQ(8, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&B3C, &RC[1]);
|
|
|
|
EXPECT_EQ(&B2C, &RC[2]);
|
|
|
|
EXPECT_EQ(&B1C, &RC[3]);
|
|
|
|
EXPECT_EQ(&C3C, &RC[4]);
|
|
|
|
EXPECT_EQ(&C2C, &RC[5]);
|
|
|
|
EXPECT_EQ(&C1C, &RC[6]);
|
|
|
|
EXPECT_EQ(&AC, &RC[7]);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
|
|
|
|
// Test for having a postorder where between the source and target are all
|
|
|
|
// three kinds of other SCCs:
|
|
|
|
// 1) One connected to the target only that have to be shifted below the
|
|
|
|
// source.
|
|
|
|
// 2) One connected to the source only that have to be shifted below the
|
|
|
|
// target.
|
|
|
|
// 3) One connected to both source and target that has to remain and get
|
|
|
|
// merged away.
|
|
|
|
//
|
|
|
|
// To achieve this we construct a heavily connected graph to force
|
|
|
|
// a particular post-order. Then we remove the forcing edges and connect
|
|
|
|
// a cycle.
|
|
|
|
//
|
|
|
|
// Diagram for the graph we want on the left and the graph we use to force
|
|
|
|
// the ordering on the right. Edges ponit down or right.
|
|
|
|
//
|
|
|
|
// A | A |
|
|
|
|
// / \ | / \ |
|
|
|
|
// B E | B \ |
|
|
|
|
// |\ | | |\ | |
|
|
|
|
// | D | | C-D-E |
|
|
|
|
// | \| | | \| |
|
|
|
|
// C F | \ F |
|
|
|
|
// \ / | \ / |
|
|
|
|
// G | G |
|
|
|
|
//
|
|
|
|
// And we form a cycle by connecting F to B.
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
"define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @e()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" call void @g()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @e()\n"
|
|
|
|
" call void @f()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @e() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @f() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @b, void()** undef\n"
|
|
|
|
" call void @g()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @g() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @a, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::Node &E = *CG.lookup(lookupFunction(*M, "e"));
|
|
|
|
LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
|
|
|
|
LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
LazyCallGraph::SCC &EC = *CG.lookupSCC(E);
|
|
|
|
LazyCallGraph::SCC &FC = *CG.lookupSCC(F);
|
|
|
|
LazyCallGraph::SCC &GC = *CG.lookupSCC(G);
|
|
|
|
|
|
|
|
// Remove the extra edges that were used to force a particular post-order.
|
|
|
|
RC.switchInternalEdgeToRef(C, D);
|
|
|
|
RC.switchInternalEdgeToRef(D, E);
|
|
|
|
|
|
|
|
// Check the initial post-order. We ensure this order with the extra edges
|
|
|
|
// that are nuked above.
|
|
|
|
ASSERT_EQ(7, RC.size());
|
|
|
|
EXPECT_EQ(&GC, &RC[0]);
|
|
|
|
EXPECT_EQ(&FC, &RC[1]);
|
|
|
|
EXPECT_EQ(&EC, &RC[2]);
|
|
|
|
EXPECT_EQ(&DC, &RC[3]);
|
|
|
|
EXPECT_EQ(&CC, &RC[4]);
|
|
|
|
EXPECT_EQ(&BC, &RC[5]);
|
|
|
|
EXPECT_EQ(&AC, &RC[6]);
|
|
|
|
|
|
|
|
// Switch F -> B to a call edge. This merges B, D, and F into a single SCC,
|
|
|
|
// and has to place the C and E SCCs on either side of it:
|
|
|
|
// A A |
|
|
|
|
// / \ / \ |
|
|
|
|
// B E | E |
|
|
|
|
// |\ | \ / |
|
|
|
|
// | D | -> B |
|
|
|
|
// | \| / \ |
|
|
|
|
// C F C | |
|
|
|
|
// \ / \ / |
|
|
|
|
// G G |
|
|
|
|
auto MergedCs = RC.switchInternalEdgeToCall(F, B);
|
|
|
|
ASSERT_EQ(2u, MergedCs.size());
|
|
|
|
EXPECT_EQ(&FC, MergedCs[0]);
|
|
|
|
EXPECT_EQ(&DC, MergedCs[1]);
|
|
|
|
EXPECT_EQ(3, BC.size());
|
|
|
|
|
|
|
|
// And make sure the postorder was updated.
|
|
|
|
ASSERT_EQ(5, RC.size());
|
|
|
|
EXPECT_EQ(&GC, &RC[0]);
|
|
|
|
EXPECT_EQ(&CC, &RC[1]);
|
|
|
|
EXPECT_EQ(&BC, &RC[2]);
|
|
|
|
EXPECT_EQ(&EC, &RC[3]);
|
|
|
|
EXPECT_EQ(&AC, &RC[4]);
|
2014-04-23 19:03:03 +08:00
|
|
|
}
|
|
|
|
|
2014-04-23 16:08:49 +08:00
|
|
|
}
|