2014-04-23 16:08:49 +08:00
|
|
|
//===- LazyCallGraphTest.cpp - Unit tests for the lazy CG analysis --------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-04-23 16:08:49 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Analysis/LazyCallGraph.h"
|
|
|
|
#include "llvm/AsmParser/Parser.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2017-06-06 19:06:56 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2014-04-23 16:08:49 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/SourceMgr.h"
|
|
|
|
#include "gtest/gtest.h"
|
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> parseAssembly(LLVMContext &Context,
|
|
|
|
const char *Assembly) {
|
2014-04-23 16:08:49 +08:00
|
|
|
SMDiagnostic Error;
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(Assembly, Error, Context);
|
2014-04-23 16:08:49 +08:00
|
|
|
|
|
|
|
std::string ErrMsg;
|
|
|
|
raw_string_ostream OS(ErrMsg);
|
|
|
|
Error.print("", OS);
|
|
|
|
|
|
|
|
// A failure here means that the test itself is buggy.
|
2014-08-20 00:58:54 +08:00
|
|
|
if (!M)
|
2014-04-23 16:08:49 +08:00
|
|
|
report_fatal_error(OS.str().c_str());
|
|
|
|
|
|
|
|
return M;
|
|
|
|
}
|
|
|
|
|
2014-10-22 10:16:06 +08:00
|
|
|
/*
|
|
|
|
IR forming a call graph with a diamond of triangle-shaped SCCs:
|
|
|
|
|
|
|
|
d1
|
|
|
|
/ \
|
|
|
|
d3--d2
|
|
|
|
/ \
|
|
|
|
b1 c1
|
|
|
|
/ \ / \
|
|
|
|
b3--b2 c3--c2
|
|
|
|
\ /
|
|
|
|
a1
|
|
|
|
/ \
|
|
|
|
a3--a2
|
|
|
|
|
|
|
|
All call edges go up between SCCs, and clockwise around the SCC.
|
|
|
|
*/
|
2014-04-23 16:08:49 +08:00
|
|
|
static const char DiamondOfTriangles[] =
|
|
|
|
"define void @a1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a2()\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @a2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @a3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" call void @d3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c2()\n"
|
|
|
|
" call void @d2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n";
|
|
|
|
|
2016-09-16 18:20:17 +08:00
|
|
|
/*
|
|
|
|
IR forming a reference graph with a diamond of triangle-shaped RefSCCs
|
|
|
|
|
|
|
|
d1
|
|
|
|
/ \
|
|
|
|
d3--d2
|
|
|
|
/ \
|
|
|
|
b1 c1
|
|
|
|
/ \ / \
|
|
|
|
b3--b2 c3--c2
|
|
|
|
\ /
|
|
|
|
a1
|
|
|
|
/ \
|
|
|
|
a3--a2
|
|
|
|
|
|
|
|
All call edges go up between RefSCCs, and clockwise around the RefSCC.
|
|
|
|
*/
|
|
|
|
static const char DiamondOfTrianglesRefGraph[] =
|
|
|
|
"define void @a1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @a2, void ()** %a\n"
|
|
|
|
" store void ()* @b2, void ()** %a\n"
|
|
|
|
" store void ()* @c3, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @a2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @a3, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @a3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @a1, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @b2, void ()** %a\n"
|
|
|
|
" store void ()* @d3, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @b3, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @b1, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @c2, void ()** %a\n"
|
|
|
|
" store void ()* @d2, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @c3, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @c1, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @d2, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @d3, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %a = alloca void ()*\n"
|
|
|
|
" store void ()* @d1, void ()** %a\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n";
|
|
|
|
|
2017-07-15 16:08:19 +08:00
|
|
|
static LazyCallGraph buildCG(Module &M) {
|
|
|
|
TargetLibraryInfoImpl TLII(Triple(M.getTargetTriple()));
|
|
|
|
TargetLibraryInfo TLI(TLII);
|
|
|
|
LazyCallGraph CG(M, TLI);
|
|
|
|
return CG;
|
|
|
|
}
|
|
|
|
|
2014-04-23 16:08:49 +08:00
|
|
|
TEST(LazyCallGraphTest, BasicGraphFormation) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-04-23 16:08:49 +08:00
|
|
|
|
|
|
|
// The order of the entry nodes should be stable w.r.t. the source order of
|
|
|
|
// the IR, and everything in our module is an entry node, so just directly
|
|
|
|
// build variables for each node.
|
|
|
|
auto I = CG.begin();
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &A1 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("a1", A1.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &A2 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("a2", A2.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &A3 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("a3", A3.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &B1 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("b1", B1.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &B2 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("b2", B2.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &B3 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("b3", B3.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &C1 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("c1", C1.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &C2 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("c2", C2.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &C3 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("c3", C3.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &D1 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("d1", D1.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &D2 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("d2", D2.getFunction().getName());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &D3 = (I++)->getNode();
|
2014-04-24 07:34:48 +08:00
|
|
|
EXPECT_EQ("d3", D3.getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ(CG.end(), I);
|
|
|
|
|
|
|
|
// Build vectors and sort them for the rest of the assertions to make them
|
|
|
|
// independent of order.
|
|
|
|
std::vector<std::string> Nodes;
|
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
for (LazyCallGraph::Edge &E : A1.populate())
|
2016-02-02 11:57:13 +08:00
|
|
|
Nodes.push_back(E.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("a2", Nodes[0]);
|
|
|
|
EXPECT_EQ("b2", Nodes[1]);
|
|
|
|
EXPECT_EQ("c3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
A2.populate();
|
|
|
|
EXPECT_EQ(A2->end(), std::next(A2->begin()));
|
|
|
|
EXPECT_EQ("a3", A2->begin()->getFunction().getName());
|
|
|
|
A3.populate();
|
|
|
|
EXPECT_EQ(A3->end(), std::next(A3->begin()));
|
|
|
|
EXPECT_EQ("a1", A3->begin()->getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
for (LazyCallGraph::Edge &E : B1.populate())
|
2016-02-02 11:57:13 +08:00
|
|
|
Nodes.push_back(E.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("b2", Nodes[0]);
|
|
|
|
EXPECT_EQ("d3", Nodes[1]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
B2.populate();
|
|
|
|
EXPECT_EQ(B2->end(), std::next(B2->begin()));
|
|
|
|
EXPECT_EQ("b3", B2->begin()->getFunction().getName());
|
|
|
|
B3.populate();
|
|
|
|
EXPECT_EQ(B3->end(), std::next(B3->begin()));
|
|
|
|
EXPECT_EQ("b1", B3->begin()->getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
for (LazyCallGraph::Edge &E : C1.populate())
|
2016-02-02 11:57:13 +08:00
|
|
|
Nodes.push_back(E.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("c2", Nodes[0]);
|
|
|
|
EXPECT_EQ("d2", Nodes[1]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
C2.populate();
|
|
|
|
EXPECT_EQ(C2->end(), std::next(C2->begin()));
|
|
|
|
EXPECT_EQ("c3", C2->begin()->getFunction().getName());
|
|
|
|
C3.populate();
|
|
|
|
EXPECT_EQ(C3->end(), std::next(C3->begin()));
|
|
|
|
EXPECT_EQ("c1", C3->begin()->getFunction().getName());
|
|
|
|
|
|
|
|
D1.populate();
|
|
|
|
EXPECT_EQ(D1->end(), std::next(D1->begin()));
|
|
|
|
EXPECT_EQ("d2", D1->begin()->getFunction().getName());
|
|
|
|
D2.populate();
|
|
|
|
EXPECT_EQ(D2->end(), std::next(D2->begin()));
|
|
|
|
EXPECT_EQ("d3", D2->begin()->getFunction().getName());
|
|
|
|
D3.populate();
|
|
|
|
EXPECT_EQ(D3->end(), std::next(D3->begin()));
|
|
|
|
EXPECT_EQ("d1", D3->begin()->getFunction().getName());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Now lets look at the RefSCCs and SCCs.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto J = CG.postorder_ref_scc_begin();
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &D = *J++;
|
|
|
|
ASSERT_EQ(1, D.size());
|
|
|
|
for (LazyCallGraph::Node &N : *D.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("d1", Nodes[0]);
|
|
|
|
EXPECT_EQ("d2", Nodes[1]);
|
|
|
|
EXPECT_EQ("d3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_FALSE(D.isParentOf(D));
|
|
|
|
EXPECT_FALSE(D.isChildOf(D));
|
|
|
|
EXPECT_FALSE(D.isAncestorOf(D));
|
|
|
|
EXPECT_FALSE(D.isDescendantOf(D));
|
2016-09-16 18:20:17 +08:00
|
|
|
EXPECT_EQ(&D, &*CG.postorder_ref_scc_begin());
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &C = *J++;
|
|
|
|
ASSERT_EQ(1, C.size());
|
|
|
|
for (LazyCallGraph::Node &N : *C.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("c1", Nodes[0]);
|
|
|
|
EXPECT_EQ("c2", Nodes[1]);
|
|
|
|
EXPECT_EQ("c3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_TRUE(C.isParentOf(D));
|
|
|
|
EXPECT_FALSE(C.isChildOf(D));
|
|
|
|
EXPECT_TRUE(C.isAncestorOf(D));
|
|
|
|
EXPECT_FALSE(C.isDescendantOf(D));
|
2016-09-16 18:20:17 +08:00
|
|
|
EXPECT_EQ(&C, &*std::next(CG.postorder_ref_scc_begin()));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &B = *J++;
|
|
|
|
ASSERT_EQ(1, B.size());
|
|
|
|
for (LazyCallGraph::Node &N : *B.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("b1", Nodes[0]);
|
|
|
|
EXPECT_EQ("b2", Nodes[1]);
|
|
|
|
EXPECT_EQ("b3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_TRUE(B.isParentOf(D));
|
|
|
|
EXPECT_FALSE(B.isChildOf(D));
|
|
|
|
EXPECT_TRUE(B.isAncestorOf(D));
|
|
|
|
EXPECT_FALSE(B.isDescendantOf(D));
|
|
|
|
EXPECT_FALSE(B.isAncestorOf(C));
|
|
|
|
EXPECT_FALSE(C.isAncestorOf(B));
|
2016-09-16 18:20:17 +08:00
|
|
|
EXPECT_EQ(&B, &*std::next(CG.postorder_ref_scc_begin(), 2));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &A = *J++;
|
|
|
|
ASSERT_EQ(1, A.size());
|
|
|
|
for (LazyCallGraph::Node &N : *A.begin())
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
2014-04-24 17:59:56 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
2014-04-23 16:08:49 +08:00
|
|
|
EXPECT_EQ("a1", Nodes[0]);
|
|
|
|
EXPECT_EQ("a2", Nodes[1]);
|
|
|
|
EXPECT_EQ("a3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
2014-05-01 20:12:42 +08:00
|
|
|
EXPECT_TRUE(A.isParentOf(B));
|
|
|
|
EXPECT_TRUE(A.isParentOf(C));
|
|
|
|
EXPECT_FALSE(A.isParentOf(D));
|
|
|
|
EXPECT_TRUE(A.isAncestorOf(B));
|
|
|
|
EXPECT_TRUE(A.isAncestorOf(C));
|
|
|
|
EXPECT_TRUE(A.isAncestorOf(D));
|
2016-09-16 18:20:17 +08:00
|
|
|
EXPECT_EQ(&A, &*std::next(CG.postorder_ref_scc_begin(), 3));
|
2014-04-23 16:08:49 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), J);
|
2016-09-16 18:20:17 +08:00
|
|
|
EXPECT_EQ(J, std::next(CG.postorder_ref_scc_begin(), 4));
|
2014-04-23 16:08:49 +08:00
|
|
|
}
|
|
|
|
|
2014-04-23 18:31:17 +08:00
|
|
|
static Function &lookupFunction(Module &M, StringRef Name) {
|
|
|
|
for (Function &F : M)
|
|
|
|
if (F.getName() == Name)
|
|
|
|
return F;
|
|
|
|
report_fatal_error("Couldn't find function!");
|
|
|
|
}
|
|
|
|
|
2014-04-28 19:10:23 +08:00
|
|
|
TEST(LazyCallGraphTest, BasicGraphMutation) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-04-28 19:10:23 +08:00
|
|
|
|
|
|
|
LazyCallGraph::Node &A = CG.get(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = CG.get(lookupFunction(*M, "b"));
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
A.populate();
|
|
|
|
EXPECT_EQ(2, std::distance(A->begin(), A->end()));
|
|
|
|
B.populate();
|
|
|
|
EXPECT_EQ(0, std::distance(B->begin(), B->end()));
|
|
|
|
|
|
|
|
LazyCallGraph::Node &C = CG.get(lookupFunction(*M, "c"));
|
|
|
|
C.populate();
|
|
|
|
CG.insertEdge(B, C, LazyCallGraph::Edge::Call);
|
|
|
|
EXPECT_EQ(1, std::distance(B->begin(), B->end()));
|
|
|
|
EXPECT_EQ(0, std::distance(C->begin(), C->end()));
|
|
|
|
|
|
|
|
CG.insertEdge(C, B, LazyCallGraph::Edge::Call);
|
|
|
|
EXPECT_EQ(1, std::distance(C->begin(), C->end()));
|
|
|
|
EXPECT_EQ(&B, &C->begin()->getNode());
|
|
|
|
|
|
|
|
CG.insertEdge(C, C, LazyCallGraph::Edge::Call);
|
|
|
|
EXPECT_EQ(2, std::distance(C->begin(), C->end()));
|
|
|
|
EXPECT_EQ(&B, &C->begin()->getNode());
|
|
|
|
EXPECT_EQ(&C, &std::next(C->begin())->getNode());
|
|
|
|
|
|
|
|
CG.removeEdge(C, B);
|
|
|
|
EXPECT_EQ(1, std::distance(C->begin(), C->end()));
|
|
|
|
EXPECT_EQ(&C, &C->begin()->getNode());
|
|
|
|
|
|
|
|
CG.removeEdge(C, C);
|
|
|
|
EXPECT_EQ(0, std::distance(C->begin(), C->end()));
|
|
|
|
|
|
|
|
CG.removeEdge(B, C);
|
|
|
|
EXPECT_EQ(0, std::distance(B->begin(), B->end()));
|
2014-04-28 19:10:23 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InnerSCCFormation) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Now mutate the graph to connect every node into a single RefSCC to ensure
|
|
|
|
// that our inner SCC formation handles the rest.
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::Node &D1 = CG.get(lookupFunction(*M, "d1"));
|
|
|
|
LazyCallGraph::Node &A1 = CG.get(lookupFunction(*M, "a1"));
|
|
|
|
A1.populate();
|
|
|
|
D1.populate();
|
|
|
|
CG.insertEdge(D1, A1, LazyCallGraph::Edge::Ref);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Build vectors and sort them for the rest of the assertions to make them
|
|
|
|
// independent of order.
|
|
|
|
std::vector<std::string> Nodes;
|
|
|
|
|
|
|
|
// We should build a single RefSCC for the entire graph.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
// Now walk the four SCCs which should be in post-order.
|
|
|
|
auto J = RC.begin();
|
|
|
|
LazyCallGraph::SCC &D = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : D)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("d1", Nodes[0]);
|
|
|
|
EXPECT_EQ("d2", Nodes[1]);
|
|
|
|
EXPECT_EQ("d3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &B = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : B)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("b1", Nodes[0]);
|
|
|
|
EXPECT_EQ("b2", Nodes[1]);
|
|
|
|
EXPECT_EQ("b3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &C = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : C)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("c1", Nodes[0]);
|
|
|
|
EXPECT_EQ("c2", Nodes[1]);
|
|
|
|
EXPECT_EQ("c3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &A = *J++;
|
|
|
|
for (LazyCallGraph::Node &N : A)
|
|
|
|
Nodes.push_back(N.getFunction().getName());
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Nodes);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(3u, Nodes.size());
|
|
|
|
EXPECT_EQ("a1", Nodes[0]);
|
|
|
|
EXPECT_EQ("a2", Nodes[1]);
|
|
|
|
EXPECT_EQ("a3", Nodes[2]);
|
|
|
|
Nodes.clear();
|
|
|
|
|
|
|
|
EXPECT_EQ(RC.end(), J);
|
|
|
|
}
|
|
|
|
|
2014-04-23 18:31:17 +08:00
|
|
|
TEST(LazyCallGraphTest, MultiArmSCC) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
2014-04-23 18:31:17 +08:00
|
|
|
// Two interlocking cycles. The really useful thing about this SCC is that it
|
|
|
|
// will require Tarjan's DFS to backtrack and finish processing all of the
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// children of each node in the SCC. Since this involves call edges, both
|
|
|
|
// Tarjan implementations will have to successfully navigate the structure.
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, "define void @f1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f2()\n"
|
|
|
|
" call void @f4()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @f2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @f3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @f4() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f5()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @f5() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-04-23 18:31:17 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &N1 = *CG.lookup(lookupFunction(*M, "f1"));
|
|
|
|
LazyCallGraph::Node &N2 = *CG.lookup(lookupFunction(*M, "f2"));
|
|
|
|
LazyCallGraph::Node &N3 = *CG.lookup(lookupFunction(*M, "f3"));
|
|
|
|
LazyCallGraph::Node &N4 = *CG.lookup(lookupFunction(*M, "f4"));
|
|
|
|
LazyCallGraph::Node &N5 = *CG.lookup(lookupFunction(*M, "f4"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N1));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N2));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N3));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N4));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(N5));
|
|
|
|
|
|
|
|
ASSERT_EQ(1, RC.size());
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &C = *RC.begin();
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N1));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N2));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N3));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N4));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(N5));
|
2014-04-23 18:31:17 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-05-04 17:38:23 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
2016-09-16 18:20:17 +08:00
|
|
|
dbgs() << "Formed RefSCC: " << RC << "\n";
|
2014-05-04 17:38:23 +08:00
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(BRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isParentOf(BC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isParentOf(CRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isParentOf(CC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_FALSE(ARC.isParentOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_FALSE(AC.isParentOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isAncestorOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_FALSE(DRC.isChildOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_FALSE(DC.isChildOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isDescendantOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isChildOf(BRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isChildOf(BC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isChildOf(CRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isChildOf(CC));
|
2014-05-04 17:38:23 +08:00
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(2, std::distance(A->begin(), A->end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ARC.insertOutgoingEdge(A, D, LazyCallGraph::Edge::Call);
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(3, std::distance(A->begin(), A->end()));
|
|
|
|
const LazyCallGraph::Edge &NewE = (*A)[D];
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(NewE);
|
|
|
|
EXPECT_TRUE(NewE.isCall());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(&D, &NewE.getNode());
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Only the parent and child tests sholud have changed. The rest of the graph
|
|
|
|
// remains the same.
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isParentOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isAncestorOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isChildOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isChildOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isDescendantOf(AC));
|
2014-05-04 17:38:23 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
ARC.switchOutgoingEdgeToRef(A, D);
|
|
|
|
EXPECT_FALSE(NewE.isCall());
|
|
|
|
|
2016-11-23 04:35:32 +08:00
|
|
|
// Verify the reference graph remains the same but the SCC graph is updated.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isParentOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_FALSE(AC.isParentOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isAncestorOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isChildOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_FALSE(DC.isChildOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isDescendantOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
ARC.switchOutgoingEdgeToCall(A, D);
|
|
|
|
EXPECT_TRUE(NewE.isCall());
|
|
|
|
|
2016-11-23 04:35:32 +08:00
|
|
|
// Verify the reference graph remains the same but the SCC graph is updated.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isParentOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isParentOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isAncestorOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isChildOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isChildOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isDescendantOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
ARC.removeOutgoingEdge(A, D);
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(2, std::distance(A->begin(), A->end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Now the parent and child tests fail again but the rest remains the same.
|
|
|
|
EXPECT_FALSE(ARC.isParentOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_FALSE(AC.isParentOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isAncestorOf(DRC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(AC.isAncestorOf(DC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_FALSE(DRC.isChildOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_FALSE(DC.isChildOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(DRC.isDescendantOf(ARC));
|
2016-11-23 04:35:32 +08:00
|
|
|
EXPECT_TRUE(DC.isDescendantOf(AC));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
|
2014-05-04 17:38:23 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
2014-05-04 17:38:32 +08:00
|
|
|
// We want to ensure we can add edges even across complex diamond graphs, so
|
|
|
|
// we use the diamond of triangles graph defined above. The ascii diagram is
|
|
|
|
// repeated here for easy reference.
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / \ |
|
|
|
|
// d3--d2 |
|
|
|
|
// / \ |
|
|
|
|
// b1 c1 |
|
|
|
|
// / \ / \ |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
|
|
|
//
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
2016-09-16 18:20:17 +08:00
|
|
|
dbgs() << "Formed RefSCC: " << RC << "\n";
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
|
|
|
|
LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
|
|
|
|
LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
|
|
|
|
LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
|
|
|
|
LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
|
|
|
|
LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
|
|
|
|
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
|
|
|
|
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
// Add an edge to make the graph:
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / \ |
|
|
|
|
// d3--d2---. |
|
|
|
|
// / \ | |
|
|
|
|
// b1 c1 | |
|
|
|
|
// / \ / \ / |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
|
2014-05-04 17:38:32 +08:00
|
|
|
// Make sure we connected the nodes.
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
for (LazyCallGraph::Edge E : *D2) {
|
|
|
|
if (&E.getNode() == &D3)
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
continue;
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(&C2, &E.getNode());
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
}
|
|
|
|
// And marked the D ref-SCC as no longer valid.
|
|
|
|
EXPECT_EQ(1u, MergedRCs.size());
|
|
|
|
EXPECT_EQ(&DRC, MergedRCs[0]);
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
// Make sure we have the correct nodes in the SCC sets.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
|
2014-05-04 17:38:32 +08:00
|
|
|
|
|
|
|
// And that ancestry tests have been updated.
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_TRUE(ARC.isParentOf(CRC));
|
|
|
|
EXPECT_TRUE(BRC.isParentOf(CRC));
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// And verify the post-order walk reflects the updated structure.
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
ASSERT_NE(I, E);
|
|
|
|
EXPECT_EQ(&CRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
EXPECT_EQ(++I, E);
|
2014-05-04 17:38:32 +08:00
|
|
|
}
|
|
|
|
|
2016-09-16 18:20:17 +08:00
|
|
|
TEST(LazyCallGraphTest, IncomingEdgeInsertionRefGraph) {
|
|
|
|
LLVMContext Context;
|
|
|
|
// Another variation of the above test but with all the edges switched to
|
|
|
|
// references rather than calls.
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, DiamondOfTrianglesRefGraph);
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-09-16 18:20:17 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
|
|
|
dbgs() << "Formed RefSCC: " << RC << "\n";
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
|
|
|
|
LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
|
|
|
|
LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
|
|
|
|
LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
|
|
|
|
LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
|
|
|
|
LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
|
|
|
|
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
|
|
|
|
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
|
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// Add an edge to make the graph:
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / \ |
|
|
|
|
// d3--d2---. |
|
|
|
|
// / \ | |
|
|
|
|
// b1 c1 | |
|
|
|
|
// / \ / \ / |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
|
|
|
auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
|
|
|
|
// Make sure we connected the nodes.
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
for (LazyCallGraph::Edge E : *D2) {
|
|
|
|
if (&E.getNode() == &D3)
|
2016-09-16 18:20:17 +08:00
|
|
|
continue;
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(&C2, &E.getNode());
|
2016-09-16 18:20:17 +08:00
|
|
|
}
|
|
|
|
// And marked the D ref-SCC as no longer valid.
|
|
|
|
EXPECT_EQ(1u, MergedRCs.size());
|
|
|
|
EXPECT_EQ(&DRC, MergedRCs[0]);
|
|
|
|
|
|
|
|
// Make sure we have the correct nodes in the SCC sets.
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
|
|
|
|
|
|
|
|
// And that ancestry tests have been updated.
|
|
|
|
EXPECT_TRUE(ARC.isParentOf(CRC));
|
|
|
|
EXPECT_TRUE(BRC.isParentOf(CRC));
|
|
|
|
|
|
|
|
// And verify the post-order walk reflects the updated structure.
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
ASSERT_NE(I, E);
|
|
|
|
EXPECT_EQ(&CRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
EXPECT_EQ(++I, E);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeCallCycle) {
|
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-09-16 18:20:17 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
|
|
|
dbgs() << "Formed RefSCC: " << RC << "\n";
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
|
|
|
|
|
|
|
|
// Connect the top to the bottom forming a large RefSCC made up mostly of calls.
|
|
|
|
auto MergedRCs = ARC.insertIncomingRefEdge(D, A);
|
|
|
|
// Make sure we connected the nodes.
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_NE(D->begin(), D->end());
|
|
|
|
EXPECT_EQ(&A, &D->begin()->getNode());
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// Check that we have the dead RCs, but ignore the order.
|
|
|
|
EXPECT_EQ(3u, MergedRCs.size());
|
|
|
|
EXPECT_NE(find(MergedRCs, &BRC), MergedRCs.end());
|
|
|
|
EXPECT_NE(find(MergedRCs, &CRC), MergedRCs.end());
|
|
|
|
EXPECT_NE(find(MergedRCs, &DRC), MergedRCs.end());
|
|
|
|
|
|
|
|
// Make sure the nodes point to the right place now.
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
// Check that the SCCs are in postorder.
|
|
|
|
EXPECT_EQ(4, ARC.size());
|
|
|
|
EXPECT_EQ(&DC, &ARC[0]);
|
|
|
|
EXPECT_EQ(&CC, &ARC[1]);
|
|
|
|
EXPECT_EQ(&BC, &ARC[2]);
|
|
|
|
EXPECT_EQ(&AC, &ARC[3]);
|
|
|
|
|
|
|
|
// And verify the post-order walk reflects the updated structure.
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
ASSERT_NE(I, E);
|
|
|
|
EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
EXPECT_EQ(++I, E);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeRefCycle) {
|
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %p = alloca void ()*\n"
|
|
|
|
" store void ()* @b, void ()** %p\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %p = alloca void ()*\n"
|
|
|
|
" store void ()* @c, void ()** %p\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" %p = alloca void ()*\n"
|
|
|
|
" store void ()* @d, void ()** %p\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-09-16 18:20:17 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
|
|
|
dbgs() << "Formed RefSCC: " << RC << "\n";
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
|
|
|
|
|
|
|
|
// Connect the top to the bottom forming a large RefSCC made up just of
|
|
|
|
// references.
|
|
|
|
auto MergedRCs = ARC.insertIncomingRefEdge(D, A);
|
|
|
|
// Make sure we connected the nodes.
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_NE(D->begin(), D->end());
|
|
|
|
EXPECT_EQ(&A, &D->begin()->getNode());
|
2016-09-16 18:20:17 +08:00
|
|
|
|
|
|
|
// Check that we have the dead RCs, but ignore the order.
|
|
|
|
EXPECT_EQ(3u, MergedRCs.size());
|
|
|
|
EXPECT_NE(find(MergedRCs, &BRC), MergedRCs.end());
|
|
|
|
EXPECT_NE(find(MergedRCs, &CRC), MergedRCs.end());
|
|
|
|
EXPECT_NE(find(MergedRCs, &DRC), MergedRCs.end());
|
|
|
|
|
|
|
|
// Make sure the nodes point to the right place now.
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(D));
|
|
|
|
|
|
|
|
// And verify the post-order walk reflects the updated structure.
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), End = CG.postorder_ref_scc_end();
|
|
|
|
ASSERT_NE(I, End);
|
|
|
|
EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
EXPECT_EQ(++I, End);
|
2014-05-04 17:38:32 +08:00
|
|
|
}
|
|
|
|
|
2016-10-12 15:59:56 +08:00
|
|
|
TEST(LazyCallGraphTest, InlineAndDeleteFunction) {
|
|
|
|
LLVMContext Context;
|
|
|
|
// We want to ensure we can delete nodes from relatively complex graphs and
|
|
|
|
// so use the diamond of triangles graph defined above.
|
|
|
|
//
|
|
|
|
// The ascii diagram is repeated here for easy reference.
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / \ |
|
|
|
|
// d3--d2 |
|
|
|
|
// / \ |
|
|
|
|
// b1 c1 |
|
|
|
|
// / \ / \ |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
|
|
|
//
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2016-10-12 15:59:56 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-10-12 15:59:56 +08:00
|
|
|
for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
|
|
|
|
dbgs() << "Formed RefSCC: " << RC << "\n";
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
|
|
|
|
LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
|
|
|
|
LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
|
|
|
|
LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
|
|
|
|
LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
|
|
|
|
LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
|
|
|
|
LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
|
|
|
|
LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
|
|
|
|
LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
|
|
|
|
LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
|
|
|
|
LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
|
|
|
|
LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
|
|
|
|
ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
|
2016-10-12 15:59:56 +08:00
|
|
|
|
|
|
|
// Delete d2 from the graph, as if it had been inlined.
|
|
|
|
//
|
|
|
|
// d1 |
|
|
|
|
// / / |
|
|
|
|
// d3--. |
|
|
|
|
// / \ |
|
|
|
|
// b1 c1 |
|
|
|
|
// / \ / \ |
|
|
|
|
// b3--b2 c3--c2 |
|
|
|
|
// \ / |
|
|
|
|
// a1 |
|
|
|
|
// / \ |
|
|
|
|
// a3--a2 |
|
|
|
|
|
|
|
|
Function &D2F = D2.getFunction();
|
|
|
|
CallInst *C1Call = nullptr, *D1Call = nullptr;
|
|
|
|
for (User *U : D2F.users()) {
|
|
|
|
CallInst *CI = dyn_cast<CallInst>(U);
|
|
|
|
ASSERT_TRUE(CI) << "Expected a call: " << *U;
|
|
|
|
if (CI->getParent()->getParent() == &C1.getFunction()) {
|
|
|
|
ASSERT_EQ(nullptr, C1Call) << "Found too many C1 calls: " << *CI;
|
|
|
|
C1Call = CI;
|
|
|
|
} else if (CI->getParent()->getParent() == &D1.getFunction()) {
|
|
|
|
ASSERT_EQ(nullptr, D1Call) << "Found too many D1 calls: " << *CI;
|
|
|
|
D1Call = CI;
|
|
|
|
} else {
|
|
|
|
FAIL() << "Found an unexpected call instruction: " << *CI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_NE(C1Call, nullptr);
|
|
|
|
ASSERT_NE(D1Call, nullptr);
|
|
|
|
ASSERT_EQ(&D2F, C1Call->getCalledFunction());
|
|
|
|
ASSERT_EQ(&D2F, D1Call->getCalledFunction());
|
|
|
|
C1Call->setCalledFunction(&D3.getFunction());
|
|
|
|
D1Call->setCalledFunction(&D3.getFunction());
|
|
|
|
ASSERT_EQ(0u, D2F.getNumUses());
|
|
|
|
|
|
|
|
// Insert new edges first.
|
|
|
|
CRC.insertTrivialCallEdge(C1, D3);
|
|
|
|
DRC.insertTrivialCallEdge(D1, D3);
|
|
|
|
|
|
|
|
// Then remove the old ones.
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D2);
|
|
|
|
auto NewCs = DRC.switchInternalEdgeToRef(D1, D2);
|
|
|
|
EXPECT_EQ(&DC, CG.lookupSCC(D2));
|
|
|
|
EXPECT_EQ(NewCs.end(), std::next(NewCs.begin()));
|
|
|
|
LazyCallGraph::SCC &NewDC = *NewCs.begin();
|
|
|
|
EXPECT_EQ(&NewDC, CG.lookupSCC(D1));
|
|
|
|
EXPECT_EQ(&NewDC, CG.lookupSCC(D3));
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
auto NewRCs = DRC.removeInternalRefEdge(D1, {&D2});
|
|
|
|
ASSERT_EQ(2u, NewRCs.size());
|
|
|
|
LazyCallGraph::RefSCC &NewDRC = *NewRCs[0];
|
2016-10-12 15:59:56 +08:00
|
|
|
EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D1));
|
|
|
|
EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D3));
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
LazyCallGraph::RefSCC &D2RC = *NewRCs[1];
|
|
|
|
EXPECT_EQ(&D2RC, CG.lookupRefSCC(D2));
|
|
|
|
EXPECT_FALSE(NewDRC.isParentOf(D2RC));
|
|
|
|
EXPECT_TRUE(CRC.isParentOf(D2RC));
|
2016-10-12 15:59:56 +08:00
|
|
|
EXPECT_TRUE(CRC.isParentOf(NewDRC));
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
EXPECT_TRUE(D2RC.isParentOf(NewDRC));
|
2016-10-12 15:59:56 +08:00
|
|
|
CRC.removeOutgoingEdge(C1, D2);
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
EXPECT_FALSE(CRC.isParentOf(D2RC));
|
2016-10-12 15:59:56 +08:00
|
|
|
EXPECT_TRUE(CRC.isParentOf(NewDRC));
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
EXPECT_TRUE(D2RC.isParentOf(NewDRC));
|
2016-10-12 15:59:56 +08:00
|
|
|
|
|
|
|
// Now that we've updated the call graph, D2 is dead, so remove it.
|
|
|
|
CG.removeDeadFunction(D2F);
|
|
|
|
|
|
|
|
// Check that the graph still looks the same.
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
|
|
|
|
EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
|
|
|
|
EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D1));
|
|
|
|
EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D3));
|
|
|
|
EXPECT_TRUE(CRC.isParentOf(NewDRC));
|
|
|
|
|
|
|
|
// Verify the post-order walk hasn't changed.
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
ASSERT_NE(I, E);
|
|
|
|
EXPECT_EQ(&NewDRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&CRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
ASSERT_NE(++I, E);
|
|
|
|
EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
|
|
|
|
EXPECT_EQ(++I, E);
|
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalEdgeMutation) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
2014-04-24 07:12:06 +08:00
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(1, RC.size());
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
|
|
|
|
|
|
|
|
// Insert an edge from 'a' to 'c'. Nothing changes about the graph.
|
|
|
|
RC.insertInternalRefEdge(A, C);
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
EXPECT_EQ(2, std::distance(A->begin(), A->end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
EXPECT_EQ(1, RC.size());
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
|
|
|
|
|
|
|
|
// Switch the call edge from 'b' to 'c' to a ref edge. This will break the
|
|
|
|
// call cycle and cause us to form more SCCs. The RefSCC will remain the same
|
|
|
|
// though.
|
2016-12-28 18:34:50 +08:00
|
|
|
auto NewCs = RC.switchInternalEdgeToRef(B, C);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
auto J = RC.begin();
|
|
|
|
// The SCCs must be in *post-order* which means successors before
|
|
|
|
// predecessors. At this point we have call edges from C to A and from A to
|
|
|
|
// B. The only valid postorder is B, A, C.
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(RC.end(), J);
|
2016-12-28 18:34:50 +08:00
|
|
|
// And the returned range must be the slice of this sequence containing new
|
|
|
|
// SCCs.
|
|
|
|
EXPECT_EQ(RC.begin(), NewCs.begin());
|
|
|
|
EXPECT_EQ(std::prev(RC.end()), NewCs.end());
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Test turning the ref edge from A to C into a call edge. This will form an
|
|
|
|
// SCC out of A and C. Since we previously had a call edge from C to A, the
|
|
|
|
// C SCC should be preserved and have A merged into it while the A SCC should
|
|
|
|
// be invalidated.
|
2014-04-24 07:12:06 +08:00
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
2017-07-09 21:45:11 +08:00
|
|
|
EXPECT_TRUE(RC.switchInternalEdgeToCall(A, C, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
|
|
|
|
ASSERT_EQ(1u, MergedCs.size());
|
|
|
|
EXPECT_EQ(&AC, MergedCs[0]);
|
|
|
|
}));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(2, CC.size());
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(A));
|
|
|
|
EXPECT_EQ(&CC, CG.lookupSCC(C));
|
|
|
|
J = RC.begin();
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&*J++, CG.lookupSCC(C));
|
|
|
|
EXPECT_EQ(RC.end(), J);
|
2014-04-23 19:03:03 +08:00
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalEdgeRemoval) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// A nice fully connected (including self-edges) RefSCC.
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
2016-04-15 05:59:01 +08:00
|
|
|
Context, "define void @a(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-04-30 18:48:36 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-09-16 18:20:17 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I;
|
|
|
|
EXPECT_EQ(E, std::next(I));
|
2014-04-30 18:48:36 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
|
|
|
|
// Remove the edge from b -> a, which should leave the 3 functions still in
|
|
|
|
// a single connected component because of a -> b -> c -> a.
|
|
|
|
SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
RC.removeInternalRefEdge(B, {&A});
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(0u, NewRCs.size());
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
2016-09-16 18:20:17 +08:00
|
|
|
auto J = CG.postorder_ref_scc_begin();
|
|
|
|
EXPECT_EQ(I, J);
|
|
|
|
EXPECT_EQ(&RC, &*J);
|
|
|
|
EXPECT_EQ(E, std::next(J));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
// Increment I before we actually mutate the structure so that it remains
|
|
|
|
// a valid iterator.
|
|
|
|
++I;
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Remove the edge from c -> a, which should leave 'a' in the original RefSCC
|
|
|
|
// and form a new RefSCC for 'b' and 'c'.
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
NewRCs = RC.removeInternalRefEdge(C, {&A});
|
|
|
|
ASSERT_EQ(2u, NewRCs.size());
|
|
|
|
LazyCallGraph::RefSCC &BCRC = *NewRCs[0];
|
|
|
|
LazyCallGraph::RefSCC &ARC = *NewRCs[1];
|
|
|
|
EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(1, std::distance(ARC.begin(), ARC.end()));
|
|
|
|
EXPECT_EQ(&BCRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&BCRC, CG.lookupRefSCC(C));
|
2016-09-16 18:20:17 +08:00
|
|
|
J = CG.postorder_ref_scc_begin();
|
|
|
|
EXPECT_NE(I, J);
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
EXPECT_EQ(&BCRC, &*J);
|
|
|
|
++J;
|
|
|
|
EXPECT_NE(I, J);
|
|
|
|
EXPECT_EQ(&ARC, &*J);
|
2016-09-16 18:20:17 +08:00
|
|
|
++J;
|
|
|
|
EXPECT_EQ(I, J);
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
EXPECT_EQ(E, J);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalMultiEdgeRemoval) {
|
|
|
|
LLVMContext Context;
|
|
|
|
// A nice fully connected (including self-edges) RefSCC.
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
Context, "define void @a(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG = buildCG(*M);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
|
|
|
CG.buildRefSCCs();
|
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I;
|
|
|
|
EXPECT_EQ(E, std::next(I));
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(C));
|
|
|
|
|
|
|
|
// Increment I before we actually mutate the structure so that it remains
|
|
|
|
// a valid iterator.
|
2016-09-16 18:20:17 +08:00
|
|
|
++I;
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
|
|
|
|
// Remove the edges from b -> a and b -> c, leaving b in its own RefSCC.
|
|
|
|
SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
|
|
|
|
RC.removeInternalRefEdge(B, {&A, &C});
|
|
|
|
|
|
|
|
ASSERT_EQ(2u, NewRCs.size());
|
|
|
|
LazyCallGraph::RefSCC &BRC = *NewRCs[0];
|
|
|
|
LazyCallGraph::RefSCC &ACRC = *NewRCs[1];
|
|
|
|
EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
|
|
|
|
EXPECT_EQ(1, std::distance(BRC.begin(), BRC.end()));
|
|
|
|
EXPECT_EQ(&ACRC, CG.lookupRefSCC(A));
|
|
|
|
EXPECT_EQ(&ACRC, CG.lookupRefSCC(C));
|
|
|
|
auto J = CG.postorder_ref_scc_begin();
|
|
|
|
EXPECT_NE(I, J);
|
|
|
|
EXPECT_EQ(&BRC, &*J);
|
2016-09-16 18:20:17 +08:00
|
|
|
++J;
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
EXPECT_NE(I, J);
|
|
|
|
EXPECT_EQ(&ACRC, &*J);
|
|
|
|
++J;
|
|
|
|
EXPECT_EQ(I, J);
|
2016-09-16 18:20:17 +08:00
|
|
|
EXPECT_EQ(E, J);
|
2014-04-30 18:48:36 +08:00
|
|
|
}
|
|
|
|
|
2016-12-28 10:24:58 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalNoOpEdgeRemoval) {
|
|
|
|
LLVMContext Context;
|
|
|
|
// A graph with a single cycle formed both from call and reference edges
|
|
|
|
// which makes the reference edges trivial to delete. The graph looks like:
|
|
|
|
//
|
|
|
|
// Reference edges: a -> b -> c -> a
|
|
|
|
// Call edges: a -> c -> b -> a
|
|
|
|
std::unique_ptr<Module> M = parseAssembly(
|
|
|
|
Context, "define void @a(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b(i8** %ptr)\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
|
|
|
|
" call void @c(i8** %ptr)\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a(i8** %ptr)\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2016-12-28 10:24:58 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-12-28 10:24:58 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I;
|
|
|
|
EXPECT_EQ(E, std::next(I));
|
|
|
|
|
|
|
|
LazyCallGraph::SCC &C = *RC.begin();
|
|
|
|
EXPECT_EQ(RC.end(), std::next(RC.begin()));
|
|
|
|
|
|
|
|
LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(AN));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(BN));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(CN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(AN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(CN));
|
|
|
|
|
|
|
|
// Remove the edge from a -> c which doesn't change anything.
|
|
|
|
SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
RC.removeInternalRefEdge(AN, {&CN});
|
2016-12-28 10:24:58 +08:00
|
|
|
EXPECT_EQ(0u, NewRCs.size());
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(AN));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(BN));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(CN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(AN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(CN));
|
|
|
|
auto J = CG.postorder_ref_scc_begin();
|
|
|
|
EXPECT_EQ(I, J);
|
|
|
|
EXPECT_EQ(&RC, &*J);
|
|
|
|
EXPECT_EQ(E, std::next(J));
|
|
|
|
|
|
|
|
// Remove the edge from b -> a and c -> b; again this doesn't change
|
|
|
|
// anything.
|
[LCG] Switch one of the update methods for the LazyCallGraph to support
limited batch updates.
Specifically, allow removing multiple reference edges starting from
a common source node. There are a few constraints that play into
supporting this form of batching:
1) The way updates occur during the CGSCC walk, about the most we can
functionally batch together are those with a common source node. This
also makes the batching simpler to implement, so it seems
a worthwhile restriction.
2) The far and away hottest function for large C++ files I measured
(generated code for protocol buffers) showed a huge amount of time
was spent removing ref edges specifically, so it seems worth focusing
there.
3) The algorithm for removing ref edges is very amenable to this
restricted batching. There are just both API and implementation
special casing for the non-batch case that gets in the way. Once
removed, supporting batches is nearly trivial.
This does modify the API in an interesting way -- now, we only preserve
the target RefSCC when the RefSCC structure is unchanged. In the face of
any splits, we create brand new RefSCC objects. However, all of the
users were OK with it that I could find. Only the unittest needed
interesting updates here.
How much does batching these updates help? I instrumented the compiler
when run over a very large generated source file for a protocol buffer
and found that the majority of updates are intrinsically updating one
function at a time. However, nearly 40% of the total ref edges removed
are removed as part of a batch of removals greater than one, so these
are the cases batching can help with.
When compiling the IR for this file with 'opt' and 'O3', this patch
reduces the total time by 8-9%.
Differential Revision: https://reviews.llvm.org/D36352
llvm-svn: 310450
2017-08-09 17:05:27 +08:00
|
|
|
NewRCs = RC.removeInternalRefEdge(BN, {&AN});
|
|
|
|
NewRCs = RC.removeInternalRefEdge(CN, {&BN});
|
2016-12-28 10:24:58 +08:00
|
|
|
EXPECT_EQ(0u, NewRCs.size());
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(AN));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(BN));
|
|
|
|
EXPECT_EQ(&RC, CG.lookupRefSCC(CN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(AN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&C, CG.lookupSCC(CN));
|
|
|
|
J = CG.postorder_ref_scc_begin();
|
|
|
|
EXPECT_EQ(I, J);
|
|
|
|
EXPECT_EQ(&RC, &*J);
|
|
|
|
EXPECT_EQ(E, std::next(J));
|
|
|
|
}
|
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
TEST(LazyCallGraphTest, InternalCallEdgeToRef) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// A nice fully connected (including self-edges) SCC (and RefSCC)
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @a()\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
2014-04-23 19:03:03 +08:00
|
|
|
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(1, RC.size());
|
2016-12-28 18:34:50 +08:00
|
|
|
LazyCallGraph::SCC &AC = *RC.begin();
|
2014-04-23 19:03:03 +08:00
|
|
|
|
2016-12-28 18:34:50 +08:00
|
|
|
LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(AN));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(CN));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Remove the call edge from b -> a to a ref edge, which should leave the
|
|
|
|
// 3 functions still in a single connected component because of a -> b ->
|
|
|
|
// c -> a.
|
2016-12-28 18:34:50 +08:00
|
|
|
auto NewCs = RC.switchInternalEdgeToRef(BN, AN);
|
|
|
|
EXPECT_EQ(NewCs.begin(), NewCs.end());
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(1, RC.size());
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(AN));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(CN));
|
2014-04-23 19:03:03 +08:00
|
|
|
|
|
|
|
// Remove the edge from c -> a, which should leave 'a' in the original SCC
|
|
|
|
// and form a new SCC for 'b' and 'c'.
|
2016-12-28 18:34:50 +08:00
|
|
|
NewCs = RC.switchInternalEdgeToRef(CN, AN);
|
|
|
|
EXPECT_EQ(1, std::distance(NewCs.begin(), NewCs.end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(2, RC.size());
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(AN));
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(BN);
|
|
|
|
EXPECT_NE(&BC, &AC);
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(CN));
|
|
|
|
auto J = RC.find(AC);
|
|
|
|
EXPECT_EQ(&AC, &*J);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
--J;
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(&BC, &*J);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(RC.begin(), J);
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(J, NewCs.begin());
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Remove the edge from c -> b, which should leave 'b' in the original SCC
|
|
|
|
// and form a new SCC for 'c'. It shouldn't change 'a's SCC.
|
2016-12-28 18:34:50 +08:00
|
|
|
NewCs = RC.switchInternalEdgeToRef(CN, BN);
|
|
|
|
EXPECT_EQ(1, std::distance(NewCs.begin(), NewCs.end()));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(3, RC.size());
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(&AC, CG.lookupSCC(AN));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(BN));
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(CN);
|
|
|
|
EXPECT_NE(&CC, &AC);
|
|
|
|
EXPECT_NE(&CC, &BC);
|
|
|
|
J = RC.find(AC);
|
|
|
|
EXPECT_EQ(&AC, &*J);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
--J;
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(&BC, &*J);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
--J;
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(&CC, &*J);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(RC.begin(), J);
|
2016-12-28 18:34:50 +08:00
|
|
|
EXPECT_EQ(J, NewCs.begin());
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Basic tests for making a ref edge a call. This hits the basics of the
|
|
|
|
// process only.
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" store void()* @d, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @c, void()** undef\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @b, void()** undef\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @a, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
|
|
|
|
// Check the initial post-order. Note that B and C could be flipped here (and
|
|
|
|
// in our mutation) without changing the nature of this test.
|
|
|
|
ASSERT_EQ(4, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&BC, &RC[1]);
|
|
|
|
EXPECT_EQ(&CC, &RC[2]);
|
|
|
|
EXPECT_EQ(&AC, &RC[3]);
|
|
|
|
|
|
|
|
// Switch the ref edge from A -> D to a call edge. This should have no
|
|
|
|
// effect as it is already in postorder and no new cycles are formed.
|
2017-07-09 21:45:11 +08:00
|
|
|
EXPECT_FALSE(RC.switchInternalEdgeToCall(A, D));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ASSERT_EQ(4, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&BC, &RC[1]);
|
|
|
|
EXPECT_EQ(&CC, &RC[2]);
|
|
|
|
EXPECT_EQ(&AC, &RC[3]);
|
|
|
|
|
|
|
|
// Switch B -> C to a call edge. This doesn't form any new cycles but does
|
|
|
|
// require reordering the SCCs.
|
2017-07-09 21:45:11 +08:00
|
|
|
EXPECT_FALSE(RC.switchInternalEdgeToCall(B, C));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ASSERT_EQ(4, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&CC, &RC[1]);
|
|
|
|
EXPECT_EQ(&BC, &RC[2]);
|
|
|
|
EXPECT_EQ(&AC, &RC[3]);
|
|
|
|
|
|
|
|
// Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
|
2017-07-09 21:45:11 +08:00
|
|
|
EXPECT_TRUE(RC.switchInternalEdgeToCall(C, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
|
|
|
|
ASSERT_EQ(1u, MergedCs.size());
|
|
|
|
EXPECT_EQ(&CC, MergedCs[0]);
|
|
|
|
}));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ASSERT_EQ(3, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&BC, &RC[1]);
|
|
|
|
EXPECT_EQ(&AC, &RC[2]);
|
|
|
|
EXPECT_EQ(2, BC.size());
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(B));
|
|
|
|
EXPECT_EQ(&BC, CG.lookupSCC(C));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Test for having a post-order prior to changing a ref edge to a call edge
|
|
|
|
// with SCCs connecting to the source and connecting to the target, but not
|
|
|
|
// connecting to both, interleaved between the source and target. This
|
|
|
|
// ensures we correctly partition the range rather than simply moving one or
|
|
|
|
// the other.
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b1()\n"
|
|
|
|
" call void @c1()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c1()\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c1() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b2()\n"
|
|
|
|
" call void @c2()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c2()\n"
|
|
|
|
" call void @b3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c2() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b3()\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c3()\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c3() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @b1, void()** undef\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @a, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
|
|
|
|
LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
|
|
|
|
LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
|
|
|
|
LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
|
|
|
|
LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
|
|
|
|
LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &B1C = *CG.lookupSCC(B1);
|
|
|
|
LazyCallGraph::SCC &B2C = *CG.lookupSCC(B2);
|
|
|
|
LazyCallGraph::SCC &B3C = *CG.lookupSCC(B3);
|
|
|
|
LazyCallGraph::SCC &C1C = *CG.lookupSCC(C1);
|
|
|
|
LazyCallGraph::SCC &C2C = *CG.lookupSCC(C2);
|
|
|
|
LazyCallGraph::SCC &C3C = *CG.lookupSCC(C3);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
|
|
|
|
// Several call edges are initially present to force a particual post-order.
|
|
|
|
// Remove them now, leaving an interleaved post-order pattern.
|
2016-12-28 18:34:50 +08:00
|
|
|
RC.switchTrivialInternalEdgeToRef(B3, C3);
|
|
|
|
RC.switchTrivialInternalEdgeToRef(C2, B3);
|
|
|
|
RC.switchTrivialInternalEdgeToRef(B2, C2);
|
|
|
|
RC.switchTrivialInternalEdgeToRef(C1, B2);
|
|
|
|
RC.switchTrivialInternalEdgeToRef(B1, C1);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Check the initial post-order. We ensure this order with the extra edges
|
|
|
|
// that are nuked above.
|
|
|
|
ASSERT_EQ(8, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&C3C, &RC[1]);
|
|
|
|
EXPECT_EQ(&B3C, &RC[2]);
|
|
|
|
EXPECT_EQ(&C2C, &RC[3]);
|
|
|
|
EXPECT_EQ(&B2C, &RC[4]);
|
|
|
|
EXPECT_EQ(&C1C, &RC[5]);
|
|
|
|
EXPECT_EQ(&B1C, &RC[6]);
|
|
|
|
EXPECT_EQ(&AC, &RC[7]);
|
|
|
|
|
|
|
|
// Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
|
|
|
|
// require reordering the SCCs in the face of tricky internal node
|
|
|
|
// structures.
|
2017-07-09 21:45:11 +08:00
|
|
|
EXPECT_FALSE(RC.switchInternalEdgeToCall(C3, B1));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
ASSERT_EQ(8, RC.size());
|
|
|
|
EXPECT_EQ(&DC, &RC[0]);
|
|
|
|
EXPECT_EQ(&B3C, &RC[1]);
|
|
|
|
EXPECT_EQ(&B2C, &RC[2]);
|
|
|
|
EXPECT_EQ(&B1C, &RC[3]);
|
|
|
|
EXPECT_EQ(&C3C, &RC[4]);
|
|
|
|
EXPECT_EQ(&C2C, &RC[5]);
|
|
|
|
EXPECT_EQ(&C1C, &RC[6]);
|
|
|
|
EXPECT_EQ(&AC, &RC[7]);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
|
2016-04-15 05:59:01 +08:00
|
|
|
LLVMContext Context;
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
// Test for having a postorder where between the source and target are all
|
|
|
|
// three kinds of other SCCs:
|
|
|
|
// 1) One connected to the target only that have to be shifted below the
|
|
|
|
// source.
|
|
|
|
// 2) One connected to the source only that have to be shifted below the
|
|
|
|
// target.
|
|
|
|
// 3) One connected to both source and target that has to remain and get
|
|
|
|
// merged away.
|
|
|
|
//
|
|
|
|
// To achieve this we construct a heavily connected graph to force
|
|
|
|
// a particular post-order. Then we remove the forcing edges and connect
|
|
|
|
// a cycle.
|
|
|
|
//
|
|
|
|
// Diagram for the graph we want on the left and the graph we use to force
|
|
|
|
// the ordering on the right. Edges ponit down or right.
|
|
|
|
//
|
|
|
|
// A | A |
|
|
|
|
// / \ | / \ |
|
|
|
|
// B E | B \ |
|
|
|
|
// |\ | | |\ | |
|
|
|
|
// | D | | C-D-E |
|
|
|
|
// | \| | | \| |
|
|
|
|
// C F | \ F |
|
|
|
|
// \ / | \ / |
|
|
|
|
// G | G |
|
|
|
|
//
|
|
|
|
// And we form a cycle by connecting F to B.
|
2016-04-15 05:59:01 +08:00
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, "define void @a() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @b()\n"
|
|
|
|
" call void @e()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c()\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d()\n"
|
|
|
|
" call void @g()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @e()\n"
|
|
|
|
" call void @f()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @e() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @f()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @f() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @b, void()** undef\n"
|
|
|
|
" call void @g()\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @g() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store void()* @a, void()** undef\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::Node &E = *CG.lookup(lookupFunction(*M, "e"));
|
|
|
|
LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
|
|
|
|
LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
|
|
|
|
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
|
|
|
|
LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
|
|
|
|
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
|
|
|
|
LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
|
|
|
|
LazyCallGraph::SCC &EC = *CG.lookupSCC(E);
|
|
|
|
LazyCallGraph::SCC &FC = *CG.lookupSCC(F);
|
|
|
|
LazyCallGraph::SCC &GC = *CG.lookupSCC(G);
|
|
|
|
|
|
|
|
// Remove the extra edges that were used to force a particular post-order.
|
2016-12-28 18:34:50 +08:00
|
|
|
RC.switchTrivialInternalEdgeToRef(C, D);
|
|
|
|
RC.switchTrivialInternalEdgeToRef(D, E);
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
|
|
|
|
// Check the initial post-order. We ensure this order with the extra edges
|
|
|
|
// that are nuked above.
|
|
|
|
ASSERT_EQ(7, RC.size());
|
|
|
|
EXPECT_EQ(&GC, &RC[0]);
|
|
|
|
EXPECT_EQ(&FC, &RC[1]);
|
|
|
|
EXPECT_EQ(&EC, &RC[2]);
|
|
|
|
EXPECT_EQ(&DC, &RC[3]);
|
|
|
|
EXPECT_EQ(&CC, &RC[4]);
|
|
|
|
EXPECT_EQ(&BC, &RC[5]);
|
|
|
|
EXPECT_EQ(&AC, &RC[6]);
|
|
|
|
|
|
|
|
// Switch F -> B to a call edge. This merges B, D, and F into a single SCC,
|
|
|
|
// and has to place the C and E SCCs on either side of it:
|
|
|
|
// A A |
|
|
|
|
// / \ / \ |
|
|
|
|
// B E | E |
|
|
|
|
// |\ | \ / |
|
|
|
|
// | D | -> B |
|
|
|
|
// | \| / \ |
|
|
|
|
// C F C | |
|
|
|
|
// \ / \ / |
|
|
|
|
// G G |
|
2017-07-09 21:45:11 +08:00
|
|
|
EXPECT_TRUE(RC.switchInternalEdgeToCall(
|
|
|
|
F, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
|
|
|
|
ASSERT_EQ(2u, MergedCs.size());
|
|
|
|
EXPECT_EQ(&FC, MergedCs[0]);
|
|
|
|
EXPECT_EQ(&DC, MergedCs[1]);
|
|
|
|
}));
|
[LCG] Construct an actual call graph with call-edge SCCs nested inside
reference-edge SCCs.
This essentially builds a more normal call graph as a subgraph of the
"reference graph" that was the old model. This allows both to exist and
the different use cases to use the aspect which addresses their needs.
Specifically, the pass manager and other *ordering* constrained logic
can use the reference graph to achieve conservative order of visit,
while analyses reasoning about attributes and other properties derived
from reachability can reason about the direct call graph.
Note that this isn't necessarily complete: it doesn't model edges to
declarations or indirect calls. Those can be found by scanning the
instructions of the function if desirable, and in fact every user
currently does this in order to handle things like calls to instrinsics.
If useful, we could consider caching this information in the call graph
to save the instruction scans, but currently that doesn't seem to be
important.
An important realization for why the representation chosen here works is
that the call graph is a formal subset of the reference graph and thus
both can live within the same data structure. All SCCs of the call graph
are necessarily contained within an SCC of the reference graph, etc.
The design is to build 'RefSCC's to model SCCs of the reference graph,
and then within them more literal SCCs for the call graph.
The formation of actual call edge SCCs is not done lazily, unlike
reference edge 'RefSCC's. Instead, once a reference SCC is formed, it
directly builds the call SCCs within it and stores them in a post-order
sequence. This is used to provide a consistent platform for mutation and
update of the graph. The post-order also allows for very efficient
updates in common cases by bounding the number of nodes (and thus edges)
considered.
There is considerable common code that I'm still looking for the best
way to factor out between the various DFS implementations here. So far,
my attempts have made the code harder to read and understand despite
reducing the duplication, which seems a poor tradeoff. I've not given up
on figuring out the right way to do this, but I wanted to wait until
I at least had the system working and tested to continue attempting to
factor it differently.
This also requires introducing several new algorithms in order to handle
all of the incremental update scenarios for the more complex structure
involving two edge colorings. I've tried to comment the algorithms
sufficiently to make it clear how this is expected to work, but they may
still need more extensive documentation.
I know that there are some changes which are not strictly necessarily
coupled here. The process of developing this started out with a very
focused set of changes for the new structure of the graph and
algorithms, but subsequent changes to bring the APIs and code into
consistent and understandable patterns also ended up touching on other
aspects. There was no good way to separate these out without causing
*massive* merge conflicts. Ultimately, to a large degree this is
a rewrite of most of the core algorithms in the LCG class and so I don't
think it really matters much.
Many thanks to the careful review by Sanjoy Das!
Differential Revision: http://reviews.llvm.org/D16802
llvm-svn: 261040
2016-02-17 08:18:16 +08:00
|
|
|
EXPECT_EQ(3, BC.size());
|
|
|
|
|
|
|
|
// And make sure the postorder was updated.
|
|
|
|
ASSERT_EQ(5, RC.size());
|
|
|
|
EXPECT_EQ(&GC, &RC[0]);
|
|
|
|
EXPECT_EQ(&CC, &RC[1]);
|
|
|
|
EXPECT_EQ(&BC, &RC[2]);
|
|
|
|
EXPECT_EQ(&EC, &RC[3]);
|
|
|
|
EXPECT_EQ(&AC, &RC[4]);
|
2014-04-23 19:03:03 +08:00
|
|
|
}
|
|
|
|
|
2016-12-27 13:00:45 +08:00
|
|
|
// Test for IR containing constants using blockaddress constant expressions.
|
|
|
|
// These are truly unique constructs: constant expressions with non-constant
|
|
|
|
// operands.
|
|
|
|
TEST(LazyCallGraphTest, HandleBlockAddress) {
|
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, "define void @f() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"bb:\n"
|
|
|
|
" unreachable\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @g(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* blockaddress(@f, %bb), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2016-12-27 13:00:45 +08:00
|
|
|
|
2017-02-07 03:38:06 +08:00
|
|
|
CG.buildRefSCCs();
|
2016-12-27 13:00:45 +08:00
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &FRC = *I++;
|
|
|
|
LazyCallGraph::RefSCC &GRC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
|
|
|
|
LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
|
|
|
|
EXPECT_EQ(&FRC, CG.lookupRefSCC(F));
|
|
|
|
EXPECT_EQ(&GRC, CG.lookupRefSCC(G));
|
|
|
|
EXPECT_TRUE(GRC.isParentOf(FRC));
|
|
|
|
}
|
|
|
|
|
[INLINER] allow inlining of blockaddresses if sole uses are callbrs
Summary:
It was supposed that Ref LazyCallGraph::Edge's were being inserted by
inlining, but that doesn't seem to be the case. Instead, it seems that
there was no test for a blockaddress Constant in an instruction that
referenced the function that contained the instruction. Ex:
```
define void @f() {
%1 = alloca i8*, align 8
2:
store i8* blockaddress(@f, %2), i8** %1, align 8
ret void
}
```
When iterating blockaddresses, do not add the function they refer to
back to the worklist if the blockaddress is referring to the contained
function (as opposed to an external function).
Because blockaddress has sligtly different semantics than GNU C's
address of labels, there are 3 cases that can occur with blockaddress,
where only 1 can happen in GNU C due to C's scoping rules:
* blockaddress is within the function it refers to (possible in GNU C).
* blockaddress is within a different function than the one it refers to
(not possible in GNU C).
* blockaddress is used in to declare a global (not possible in GNU C).
The second case is tested in:
```
$ ./llvm/build/unittests/Analysis/AnalysisTests \
--gtest_filter=LazyCallGraphTest.HandleBlockAddress
```
This patch adjusts the iteration of blockaddresses in
LazyCallGraph::visitReferences to not revisit the blockaddresses
function in the first case.
The Linux kernel contains code that's not semantically valid at -O0;
specifically code passed to asm goto. It requires that asm goto be
inline-able. This patch conservatively does not attempt to handle the
more general case of inlining blockaddresses that have non-callbr users
(pr/39560).
https://bugs.llvm.org/show_bug.cgi?id=39560
https://bugs.llvm.org/show_bug.cgi?id=40722
https://github.com/ClangBuiltLinux/linux/issues/6
https://reviews.llvm.org/rL212077
Reviewers: jyknight, eli.friedman, chandlerc
Reviewed By: chandlerc
Subscribers: george.burgess.iv, nathanchance, mgorny, craig.topper, mengxu.gatech, void, mehdi_amini, E5ten, chandlerc, efriedma, eraman, hiraditya, haicheng, pirama, llvm-commits, srhines
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58260
llvm-svn: 361173
2019-05-21 00:48:09 +08:00
|
|
|
// Test that a blockaddress that refers to itself creates no new RefSCC
|
|
|
|
// connections. https://bugs.llvm.org/show_bug.cgi?id=40722
|
|
|
|
TEST(LazyCallGraphTest, HandleBlockAddress2) {
|
|
|
|
LLVMContext Context;
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context, "define void @f() {\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @g(i8** %ptr) {\n"
|
|
|
|
"bb:\n"
|
|
|
|
" store i8* blockaddress(@g, %bb), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
|
|
|
LazyCallGraph CG = buildCG(*M);
|
|
|
|
|
|
|
|
CG.buildRefSCCs();
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &GRC = *I++;
|
|
|
|
LazyCallGraph::RefSCC &FRC = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
|
|
|
LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
|
|
|
|
LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
|
|
|
|
EXPECT_EQ(&FRC, CG.lookupRefSCC(F));
|
|
|
|
EXPECT_EQ(&GRC, CG.lookupRefSCC(G));
|
|
|
|
EXPECT_FALSE(GRC.isParentOf(FRC));
|
|
|
|
EXPECT_FALSE(FRC.isParentOf(GRC));
|
|
|
|
}
|
|
|
|
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
TEST(LazyCallGraphTest, ReplaceNodeFunction) {
|
|
|
|
LLVMContext Context;
|
|
|
|
// A graph with several different kinds of edges pointing at a particular
|
|
|
|
// function.
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context,
|
|
|
|
"define void @a(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
|
|
|
|
" call void @d(i8** %ptr)"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d(i8** %ptr)"
|
|
|
|
" call void @d(i8** %ptr)"
|
|
|
|
" store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" call void @c(i8** %ptr)"
|
|
|
|
" call void @d(i8** %ptr)"
|
|
|
|
" store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
|
|
|
CG.buildRefSCCs();
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &RC1 = *I++;
|
|
|
|
LazyCallGraph::RefSCC &RC2 = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
2017-03-11 21:02:31 +08:00
|
|
|
ASSERT_EQ(2, RC1.size());
|
[PM/LCG] Teach the LazyCallGraph how to replace a function without
disturbing the graph or having to update edges.
This is motivated by porting argument promotion to the new pass manager.
Because of how LLVM IR Function objects work, in order to change their
signature a new object needs to be created. This is efficient and
straight forward in the IR but previously was very hard to implement in
LCG. We could easily replace the function a node in the graph
represents. The challenging part is how to handle updating the edges in
the graph.
LCG previously used an edge to a raw function to represent a node that
had not yet been scanned for calls and references. This was the core
of its laziness. However, that model causes this kind of update to be
very hard:
1) The keys to lookup an edge need to be `Function*`s that would all
need to be updated when we update the node.
2) There will be some unknown number of edges that haven't transitioned
from `Function*` edges to `Node*` edges.
All of this complexity isn't necessary. Instead, we can always build
a node around any function, always pointing edges at it and always using
it as the key to lookup an edge. To maintain the laziness, we need to
sink the *edges* of a node into a secondary object and explicitly model
transitioning a node from empty to populated by scanning the function.
This design seems much cleaner in a number of ways, but importantly
there is now exactly *one* place where the `Function*` has to be
updated!
Some other cleanups that fall out of this include having something to
model the *entry* edges more accurately. Rather than hand rolling parts
of the node in the graph itself, we have an explicit `EdgeSequence`
object that gives us exactly the functionality needed. We also have
a consistent place to define the edge iterators and can use them for
both the entry edges and the internal edges of the graph.
The API used to model the separation between a node and its edges is
intentionally very thin as most clients are expected to deal with nodes
that have populated edges. We model this exactly as an optional does
with an additional method to populate the edges when that is
a reasonable thing for a client to do. This is based on API design
suggestions from Richard Smith and David Blaikie, credit goes to them
for helping pick how to model this without it being either too explicit
or too implicit.
The patch is somewhat noisy due to shifting around iterator types and
new syntax for walking the edges of a node, but most of the
functionality change is in the `Edge`, `EdgeSequence`, and `Node` types.
Differential Revision: https://reviews.llvm.org/D29577
llvm-svn: 294653
2017-02-10 07:24:13 +08:00
|
|
|
LazyCallGraph::SCC &C1 = RC1[0];
|
|
|
|
LazyCallGraph::SCC &C2 = RC1[1];
|
|
|
|
|
|
|
|
LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &DN = *CG.lookup(lookupFunction(*M, "d"));
|
|
|
|
EXPECT_EQ(&C1, CG.lookupSCC(DN));
|
|
|
|
EXPECT_EQ(&C1, CG.lookupSCC(CN));
|
|
|
|
EXPECT_EQ(&C2, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&RC1, CG.lookupRefSCC(DN));
|
|
|
|
EXPECT_EQ(&RC1, CG.lookupRefSCC(CN));
|
|
|
|
EXPECT_EQ(&RC1, CG.lookupRefSCC(BN));
|
|
|
|
EXPECT_EQ(&RC2, CG.lookupRefSCC(AN));
|
|
|
|
|
|
|
|
// Now we need to build a new function 'e' with the same signature as 'd'.
|
|
|
|
Function &D = DN.getFunction();
|
|
|
|
Function &E = *Function::Create(D.getFunctionType(), D.getLinkage(), "e");
|
|
|
|
D.getParent()->getFunctionList().insert(D.getIterator(), &E);
|
|
|
|
|
|
|
|
// Change each use of 'd' to use 'e'. This is particularly easy as they have
|
|
|
|
// the same type.
|
|
|
|
D.replaceAllUsesWith(&E);
|
|
|
|
|
|
|
|
// Splice the body of the old function into the new one.
|
|
|
|
E.getBasicBlockList().splice(E.begin(), D.getBasicBlockList());
|
|
|
|
// And fix up the one argument.
|
|
|
|
D.arg_begin()->replaceAllUsesWith(&*E.arg_begin());
|
|
|
|
E.arg_begin()->takeName(&*D.arg_begin());
|
|
|
|
|
|
|
|
// Now replace the function in the graph.
|
|
|
|
RC1.replaceNodeFunction(DN, E);
|
|
|
|
|
|
|
|
EXPECT_EQ(&E, &DN.getFunction());
|
|
|
|
EXPECT_EQ(&DN, &(*CN)[DN].getNode());
|
|
|
|
EXPECT_EQ(&DN, &(*BN)[DN].getNode());
|
|
|
|
}
|
2017-02-10 07:30:14 +08:00
|
|
|
|
|
|
|
TEST(LazyCallGraphTest, RemoveFunctionWithSpurriousRef) {
|
|
|
|
LLVMContext Context;
|
|
|
|
// A graph with a couple of RefSCCs.
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssembly(Context,
|
|
|
|
"define void @a(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @b(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @c(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @d(i8** %ptr)"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @d(i8** %ptr) {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" call void @c(i8** %ptr)"
|
|
|
|
" store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n"
|
|
|
|
"define void @dead() {\n"
|
|
|
|
"entry:\n"
|
|
|
|
" ret void\n"
|
|
|
|
"}\n");
|
2017-07-15 16:08:19 +08:00
|
|
|
LazyCallGraph CG = buildCG(*M);
|
2017-02-10 07:30:14 +08:00
|
|
|
|
|
|
|
// Insert spurious ref edges.
|
|
|
|
LazyCallGraph::Node &AN = CG.get(lookupFunction(*M, "a"));
|
|
|
|
LazyCallGraph::Node &BN = CG.get(lookupFunction(*M, "b"));
|
|
|
|
LazyCallGraph::Node &CN = CG.get(lookupFunction(*M, "c"));
|
|
|
|
LazyCallGraph::Node &DN = CG.get(lookupFunction(*M, "d"));
|
|
|
|
LazyCallGraph::Node &DeadN = CG.get(lookupFunction(*M, "dead"));
|
|
|
|
AN.populate();
|
|
|
|
BN.populate();
|
|
|
|
CN.populate();
|
|
|
|
DN.populate();
|
|
|
|
DeadN.populate();
|
|
|
|
CG.insertEdge(AN, DeadN, LazyCallGraph::Edge::Ref);
|
|
|
|
CG.insertEdge(BN, DeadN, LazyCallGraph::Edge::Ref);
|
|
|
|
CG.insertEdge(CN, DeadN, LazyCallGraph::Edge::Ref);
|
|
|
|
CG.insertEdge(DN, DeadN, LazyCallGraph::Edge::Ref);
|
|
|
|
|
|
|
|
// Force the graph to be fully expanded.
|
|
|
|
CG.buildRefSCCs();
|
|
|
|
auto I = CG.postorder_ref_scc_begin();
|
|
|
|
LazyCallGraph::RefSCC &DeadRC = *I++;
|
|
|
|
LazyCallGraph::RefSCC &RC1 = *I++;
|
|
|
|
LazyCallGraph::RefSCC &RC2 = *I++;
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
|
2017-03-11 21:02:31 +08:00
|
|
|
ASSERT_EQ(2, RC1.size());
|
2017-02-10 07:30:14 +08:00
|
|
|
LazyCallGraph::SCC &C1 = RC1[0];
|
|
|
|
LazyCallGraph::SCC &C2 = RC1[1];
|
|
|
|
|
|
|
|
EXPECT_EQ(&DeadRC, CG.lookupRefSCC(DeadN));
|
|
|
|
EXPECT_EQ(&C1, CG.lookupSCC(DN));
|
|
|
|
EXPECT_EQ(&C1, CG.lookupSCC(CN));
|
|
|
|
EXPECT_EQ(&C2, CG.lookupSCC(BN));
|
|
|
|
EXPECT_EQ(&RC1, CG.lookupRefSCC(DN));
|
|
|
|
EXPECT_EQ(&RC1, CG.lookupRefSCC(CN));
|
|
|
|
EXPECT_EQ(&RC1, CG.lookupRefSCC(BN));
|
|
|
|
EXPECT_EQ(&RC2, CG.lookupRefSCC(AN));
|
|
|
|
|
|
|
|
// Now delete 'dead'. There are no uses of this function but there are
|
|
|
|
// spurious references.
|
|
|
|
CG.removeDeadFunction(DeadN.getFunction());
|
|
|
|
|
|
|
|
// The only observable change should be that the RefSCC is gone from the
|
|
|
|
// postorder sequence.
|
|
|
|
I = CG.postorder_ref_scc_begin();
|
|
|
|
EXPECT_EQ(&RC1, &*I++);
|
|
|
|
EXPECT_EQ(&RC2, &*I++);
|
|
|
|
EXPECT_EQ(CG.postorder_ref_scc_end(), I);
|
|
|
|
}
|
2014-04-23 16:08:49 +08:00
|
|
|
}
|