2018-05-02 08:43:17 +08:00
|
|
|
//===- xray-converter.cpp: XRay Trace Conversion --------------------------===//
|
2017-01-10 10:38:11 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-01-10 10:38:11 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Implements the trace conversion functions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "xray-converter.h"
|
|
|
|
|
2017-11-07 08:28:28 +08:00
|
|
|
#include "trie-node.h"
|
2017-01-10 10:38:11 +08:00
|
|
|
#include "xray-registry.h"
|
|
|
|
#include "llvm/DebugInfo/Symbolize/Symbolize.h"
|
|
|
|
#include "llvm/Support/EndianStream.h"
|
|
|
|
#include "llvm/Support/FileSystem.h"
|
2017-11-07 08:28:28 +08:00
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
2017-01-17 00:38:23 +08:00
|
|
|
#include "llvm/Support/ScopedPrinter.h"
|
2017-01-10 10:38:11 +08:00
|
|
|
#include "llvm/Support/YAMLTraits.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-02-01 08:05:29 +08:00
|
|
|
#include "llvm/XRay/InstrumentationMap.h"
|
2017-01-11 14:39:09 +08:00
|
|
|
#include "llvm/XRay/Trace.h"
|
|
|
|
#include "llvm/XRay/YAMLXRayRecord.h"
|
2017-01-10 10:38:11 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace xray;
|
|
|
|
|
|
|
|
// llvm-xray convert
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
static cl::SubCommand Convert("convert", "Trace Format Conversion");
|
|
|
|
static cl::opt<std::string> ConvertInput(cl::Positional,
|
|
|
|
cl::desc("<xray log file>"),
|
|
|
|
cl::Required, cl::sub(Convert));
|
2017-11-07 08:28:28 +08:00
|
|
|
enum class ConvertFormats { BINARY, YAML, CHROME_TRACE_EVENT };
|
2017-01-10 10:38:11 +08:00
|
|
|
static cl::opt<ConvertFormats> ConvertOutputFormat(
|
|
|
|
"output-format", cl::desc("output format"),
|
|
|
|
cl::values(clEnumValN(ConvertFormats::BINARY, "raw", "output in binary"),
|
2017-11-07 08:28:28 +08:00
|
|
|
clEnumValN(ConvertFormats::YAML, "yaml", "output in yaml"),
|
|
|
|
clEnumValN(ConvertFormats::CHROME_TRACE_EVENT, "trace_event",
|
|
|
|
"Output in chrome's trace event format. "
|
|
|
|
"May be visualized with the Catapult trace viewer.")),
|
2017-01-10 10:38:11 +08:00
|
|
|
cl::sub(Convert));
|
|
|
|
static cl::alias ConvertOutputFormat2("f", cl::aliasopt(ConvertOutputFormat),
|
|
|
|
cl::desc("Alias for -output-format"),
|
|
|
|
cl::sub(Convert));
|
|
|
|
static cl::opt<std::string>
|
|
|
|
ConvertOutput("output", cl::value_desc("output file"), cl::init("-"),
|
|
|
|
cl::desc("output file; use '-' for stdout"),
|
|
|
|
cl::sub(Convert));
|
|
|
|
static cl::alias ConvertOutput2("o", cl::aliasopt(ConvertOutput),
|
|
|
|
cl::desc("Alias for -output"),
|
|
|
|
cl::sub(Convert));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
ConvertSymbolize("symbolize",
|
|
|
|
cl::desc("symbolize function ids from the input log"),
|
|
|
|
cl::init(false), cl::sub(Convert));
|
|
|
|
static cl::alias ConvertSymbolize2("y", cl::aliasopt(ConvertSymbolize),
|
|
|
|
cl::desc("Alias for -symbolize"),
|
|
|
|
cl::sub(Convert));
|
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
ConvertInstrMap("instr_map",
|
|
|
|
cl::desc("binary with the instrumentation map, or "
|
|
|
|
"a separate instrumentation map"),
|
|
|
|
cl::value_desc("binary with xray_instr_map"),
|
|
|
|
cl::sub(Convert), cl::init(""));
|
|
|
|
static cl::alias ConvertInstrMap2("m", cl::aliasopt(ConvertInstrMap),
|
|
|
|
cl::desc("Alias for -instr_map"),
|
|
|
|
cl::sub(Convert));
|
|
|
|
static cl::opt<bool> ConvertSortInput(
|
|
|
|
"sort",
|
|
|
|
cl::desc("determines whether to sort input log records by timestamp"),
|
|
|
|
cl::sub(Convert), cl::init(true));
|
|
|
|
static cl::alias ConvertSortInput2("s", cl::aliasopt(ConvertSortInput),
|
|
|
|
cl::desc("Alias for -sort"),
|
|
|
|
cl::sub(Convert));
|
|
|
|
|
|
|
|
using llvm::yaml::Output;
|
|
|
|
|
2017-01-11 14:39:09 +08:00
|
|
|
void TraceConverter::exportAsYAML(const Trace &Records, raw_ostream &OS) {
|
2017-01-10 10:38:11 +08:00
|
|
|
YAMLXRayTrace Trace;
|
|
|
|
const auto &FH = Records.getFileHeader();
|
|
|
|
Trace.Header = {FH.Version, FH.Type, FH.ConstantTSC, FH.NonstopTSC,
|
|
|
|
FH.CycleFrequency};
|
|
|
|
Trace.Records.reserve(Records.size());
|
|
|
|
for (const auto &R : Records) {
|
|
|
|
Trace.Records.push_back({R.RecordType, R.CPU, R.Type, R.FuncId,
|
|
|
|
Symbolize ? FuncIdHelper.SymbolOrNumber(R.FuncId)
|
2017-01-17 00:38:23 +08:00
|
|
|
: llvm::to_string(R.FuncId),
|
2018-11-06 16:51:37 +08:00
|
|
|
R.TSC, R.TId, R.PId, R.CallArgs, R.Data});
|
2017-01-10 10:38:11 +08:00
|
|
|
}
|
2017-02-15 06:49:49 +08:00
|
|
|
Output Out(OS, nullptr, 0);
|
2018-11-06 16:51:37 +08:00
|
|
|
Out.setWriteDefaultValues(false);
|
2017-01-10 10:38:11 +08:00
|
|
|
Out << Trace;
|
|
|
|
}
|
|
|
|
|
2017-01-11 14:39:09 +08:00
|
|
|
void TraceConverter::exportAsRAWv1(const Trace &Records, raw_ostream &OS) {
|
2017-01-10 10:38:11 +08:00
|
|
|
// First write out the file header, in the correct endian-appropriate format
|
|
|
|
// (XRay assumes currently little endian).
|
2018-05-19 03:46:24 +08:00
|
|
|
support::endian::Writer Writer(OS, support::endianness::little);
|
2017-01-10 10:38:11 +08:00
|
|
|
const auto &FH = Records.getFileHeader();
|
|
|
|
Writer.write(FH.Version);
|
|
|
|
Writer.write(FH.Type);
|
|
|
|
uint32_t Bitfield{0};
|
|
|
|
if (FH.ConstantTSC)
|
|
|
|
Bitfield |= 1uL;
|
|
|
|
if (FH.NonstopTSC)
|
|
|
|
Bitfield |= 1uL << 1;
|
|
|
|
Writer.write(Bitfield);
|
|
|
|
Writer.write(FH.CycleFrequency);
|
|
|
|
|
|
|
|
// There's 16 bytes of padding at the end of the file header.
|
|
|
|
static constexpr uint32_t Padding4B = 0;
|
|
|
|
Writer.write(Padding4B);
|
|
|
|
Writer.write(Padding4B);
|
|
|
|
Writer.write(Padding4B);
|
|
|
|
Writer.write(Padding4B);
|
|
|
|
|
|
|
|
// Then write out the rest of the records, still in an endian-appropriate
|
|
|
|
// format.
|
|
|
|
for (const auto &R : Records) {
|
|
|
|
switch (R.Type) {
|
|
|
|
case RecordTypes::ENTER:
|
2017-09-27 12:48:03 +08:00
|
|
|
case RecordTypes::ENTER_ARG:
|
2018-11-06 16:51:37 +08:00
|
|
|
Writer.write(R.RecordType);
|
|
|
|
Writer.write(static_cast<uint8_t>(R.CPU));
|
2017-01-10 10:38:11 +08:00
|
|
|
Writer.write(uint8_t{0});
|
|
|
|
break;
|
|
|
|
case RecordTypes::EXIT:
|
2018-11-06 16:51:37 +08:00
|
|
|
Writer.write(R.RecordType);
|
|
|
|
Writer.write(static_cast<uint8_t>(R.CPU));
|
2017-01-10 10:38:11 +08:00
|
|
|
Writer.write(uint8_t{1});
|
|
|
|
break;
|
2017-09-18 14:08:46 +08:00
|
|
|
case RecordTypes::TAIL_EXIT:
|
2018-11-06 16:51:37 +08:00
|
|
|
Writer.write(R.RecordType);
|
|
|
|
Writer.write(static_cast<uint8_t>(R.CPU));
|
2017-09-18 14:08:46 +08:00
|
|
|
Writer.write(uint8_t{2});
|
|
|
|
break;
|
2018-11-06 16:51:37 +08:00
|
|
|
case RecordTypes::CUSTOM_EVENT:
|
|
|
|
case RecordTypes::TYPED_EVENT:
|
|
|
|
// Skip custom and typed event records for v1 logs.
|
|
|
|
continue;
|
2017-01-10 10:38:11 +08:00
|
|
|
}
|
|
|
|
Writer.write(R.FuncId);
|
|
|
|
Writer.write(R.TSC);
|
|
|
|
Writer.write(R.TId);
|
2018-07-13 13:38:22 +08:00
|
|
|
|
|
|
|
if (FH.Version >= 3)
|
|
|
|
Writer.write(R.PId);
|
|
|
|
else
|
|
|
|
Writer.write(Padding4B);
|
|
|
|
|
2017-01-10 10:38:11 +08:00
|
|
|
Writer.write(Padding4B);
|
|
|
|
Writer.write(Padding4B);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-07 08:28:28 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// A structure that allows building a dictionary of stack ids for the Chrome
|
|
|
|
// trace event format.
|
|
|
|
struct StackIdData {
|
|
|
|
// Each Stack of function calls has a unique ID.
|
|
|
|
unsigned id;
|
|
|
|
|
|
|
|
// Bookkeeping so that IDs can be maintained uniquely across threads.
|
|
|
|
// Traversal keeps sibling pointers to other threads stacks. This is helpful
|
|
|
|
// to determine when a thread encounters a new stack and should assign a new
|
|
|
|
// unique ID.
|
|
|
|
SmallVector<TrieNode<StackIdData> *, 4> siblings;
|
|
|
|
};
|
|
|
|
|
|
|
|
using StackTrieNode = TrieNode<StackIdData>;
|
|
|
|
|
|
|
|
// A helper function to find the sibling nodes for an encountered function in a
|
|
|
|
// thread of execution. Relies on the invariant that each time a new node is
|
|
|
|
// traversed in a thread, sibling bidirectional pointers are maintained.
|
|
|
|
SmallVector<StackTrieNode *, 4>
|
|
|
|
findSiblings(StackTrieNode *parent, int32_t FnId, uint32_t TId,
|
|
|
|
const DenseMap<uint32_t, SmallVector<StackTrieNode *, 4>>
|
|
|
|
&StackRootsByThreadId) {
|
|
|
|
|
|
|
|
SmallVector<StackTrieNode *, 4> Siblings{};
|
|
|
|
|
|
|
|
if (parent == nullptr) {
|
|
|
|
for (auto map_iter : StackRootsByThreadId) {
|
|
|
|
// Only look for siblings in other threads.
|
|
|
|
if (map_iter.first != TId)
|
|
|
|
for (auto node_iter : map_iter.second) {
|
|
|
|
if (node_iter->FuncId == FnId)
|
|
|
|
Siblings.push_back(node_iter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Siblings;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto *ParentSibling : parent->ExtraData.siblings)
|
|
|
|
for (auto node_iter : ParentSibling->Callees)
|
|
|
|
if (node_iter->FuncId == FnId)
|
|
|
|
Siblings.push_back(node_iter);
|
|
|
|
|
|
|
|
return Siblings;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Given a function being invoked in a thread with id TId, finds and returns the
|
|
|
|
// StackTrie representing the function call stack. If no node exists, creates
|
|
|
|
// the node. Assigns unique IDs to stacks newly encountered among all threads
|
|
|
|
// and keeps sibling links up to when creating new nodes.
|
|
|
|
StackTrieNode *findOrCreateStackNode(
|
|
|
|
StackTrieNode *Parent, int32_t FuncId, uint32_t TId,
|
|
|
|
DenseMap<uint32_t, SmallVector<StackTrieNode *, 4>> &StackRootsByThreadId,
|
|
|
|
DenseMap<unsigned, StackTrieNode *> &StacksByStackId, unsigned *id_counter,
|
|
|
|
std::forward_list<StackTrieNode> &NodeStore) {
|
|
|
|
SmallVector<StackTrieNode *, 4> &ParentCallees =
|
|
|
|
Parent == nullptr ? StackRootsByThreadId[TId] : Parent->Callees;
|
|
|
|
auto match = find_if(ParentCallees, [FuncId](StackTrieNode *ParentCallee) {
|
|
|
|
return FuncId == ParentCallee->FuncId;
|
|
|
|
});
|
|
|
|
if (match != ParentCallees.end())
|
|
|
|
return *match;
|
|
|
|
|
|
|
|
SmallVector<StackTrieNode *, 4> siblings =
|
|
|
|
findSiblings(Parent, FuncId, TId, StackRootsByThreadId);
|
|
|
|
if (siblings.empty()) {
|
|
|
|
NodeStore.push_front({FuncId, Parent, {}, {(*id_counter)++, {}}});
|
|
|
|
StackTrieNode *CurrentStack = &NodeStore.front();
|
|
|
|
StacksByStackId[*id_counter - 1] = CurrentStack;
|
|
|
|
ParentCallees.push_back(CurrentStack);
|
|
|
|
return CurrentStack;
|
|
|
|
}
|
|
|
|
unsigned stack_id = siblings[0]->ExtraData.id;
|
|
|
|
NodeStore.push_front({FuncId, Parent, {}, {stack_id, std::move(siblings)}});
|
|
|
|
StackTrieNode *CurrentStack = &NodeStore.front();
|
|
|
|
for (auto *sibling : CurrentStack->ExtraData.siblings)
|
|
|
|
sibling->ExtraData.siblings.push_back(CurrentStack);
|
|
|
|
ParentCallees.push_back(CurrentStack);
|
|
|
|
return CurrentStack;
|
|
|
|
}
|
|
|
|
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
void writeTraceViewerRecord(uint16_t Version, raw_ostream &OS, int32_t FuncId,
|
|
|
|
uint32_t TId, uint32_t PId, bool Symbolize,
|
|
|
|
const FuncIdConversionHelper &FuncIdHelper,
|
|
|
|
double EventTimestampUs,
|
|
|
|
const StackTrieNode &StackCursor,
|
|
|
|
StringRef FunctionPhenotype) {
|
|
|
|
OS << " ";
|
|
|
|
if (Version >= 3) {
|
|
|
|
OS << llvm::formatv(
|
|
|
|
R"({ "name" : "{0}", "ph" : "{1}", "tid" : "{2}", "pid" : "{3}", )"
|
|
|
|
R"("ts" : "{4:f4}", "sf" : "{5}" })",
|
|
|
|
(Symbolize ? FuncIdHelper.SymbolOrNumber(FuncId)
|
|
|
|
: llvm::to_string(FuncId)),
|
|
|
|
FunctionPhenotype, TId, PId, EventTimestampUs,
|
|
|
|
StackCursor.ExtraData.id);
|
|
|
|
} else {
|
|
|
|
OS << llvm::formatv(
|
|
|
|
R"({ "name" : "{0}", "ph" : "{1}", "tid" : "{2}", "pid" : "1", )"
|
|
|
|
R"("ts" : "{3:f3}", "sf" : "{4}" })",
|
|
|
|
(Symbolize ? FuncIdHelper.SymbolOrNumber(FuncId)
|
|
|
|
: llvm::to_string(FuncId)),
|
|
|
|
FunctionPhenotype, TId, EventTimestampUs, StackCursor.ExtraData.id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-07 08:28:28 +08:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void TraceConverter::exportAsChromeTraceEventFormat(const Trace &Records,
|
|
|
|
raw_ostream &OS) {
|
|
|
|
const auto &FH = Records.getFileHeader();
|
2018-07-13 13:38:22 +08:00
|
|
|
auto Version = FH.Version;
|
2017-11-07 08:28:28 +08:00
|
|
|
auto CycleFreq = FH.CycleFrequency;
|
|
|
|
|
|
|
|
unsigned id_counter = 0;
|
|
|
|
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
OS << "{\n \"traceEvents\": [";
|
2017-11-07 08:28:28 +08:00
|
|
|
DenseMap<uint32_t, StackTrieNode *> StackCursorByThreadId{};
|
|
|
|
DenseMap<uint32_t, SmallVector<StackTrieNode *, 4>> StackRootsByThreadId{};
|
|
|
|
DenseMap<unsigned, StackTrieNode *> StacksByStackId{};
|
|
|
|
std::forward_list<StackTrieNode> NodeStore{};
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
int loop_count = 0;
|
2018-08-03 17:21:31 +08:00
|
|
|
for (const auto &R : Records) {
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
if (loop_count++ == 0)
|
|
|
|
OS << "\n";
|
|
|
|
else
|
|
|
|
OS << ",\n";
|
|
|
|
|
2017-11-07 08:28:28 +08:00
|
|
|
// Chrome trace event format always wants data in micros.
|
|
|
|
// CyclesPerMicro = CycleHertz / 10^6
|
|
|
|
// TSC / CyclesPerMicro == TSC * 10^6 / CycleHertz == MicroTimestamp
|
|
|
|
// Could lose some precision here by converting the TSC to a double to
|
|
|
|
// multiply by the period in micros. 52 bit mantissa is a good start though.
|
|
|
|
// TODO: Make feature request to Chrome Trace viewer to accept ticks and a
|
|
|
|
// frequency or do some more involved calculation to avoid dangers of
|
|
|
|
// conversion.
|
|
|
|
double EventTimestampUs = double(1000000) / CycleFreq * double(R.TSC);
|
|
|
|
StackTrieNode *&StackCursor = StackCursorByThreadId[R.TId];
|
|
|
|
switch (R.Type) {
|
2018-11-06 16:51:37 +08:00
|
|
|
case RecordTypes::CUSTOM_EVENT:
|
|
|
|
case RecordTypes::TYPED_EVENT:
|
|
|
|
// TODO: Support typed and custom event rendering on Chrome Trace Viewer.
|
|
|
|
break;
|
2017-11-07 08:28:28 +08:00
|
|
|
case RecordTypes::ENTER:
|
|
|
|
case RecordTypes::ENTER_ARG:
|
|
|
|
StackCursor = findOrCreateStackNode(StackCursor, R.FuncId, R.TId,
|
|
|
|
StackRootsByThreadId, StacksByStackId,
|
|
|
|
&id_counter, NodeStore);
|
|
|
|
// Each record is represented as a json dictionary with function name,
|
2018-07-13 13:38:22 +08:00
|
|
|
// type of B for begin or E for end, thread id, process id,
|
2017-11-07 08:28:28 +08:00
|
|
|
// timestamp in microseconds, and a stack frame id. The ids are logged
|
|
|
|
// in an id dictionary after the events.
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
writeTraceViewerRecord(Version, OS, R.FuncId, R.TId, R.PId, Symbolize,
|
|
|
|
FuncIdHelper, EventTimestampUs, *StackCursor, "B");
|
2017-11-07 08:28:28 +08:00
|
|
|
break;
|
|
|
|
case RecordTypes::EXIT:
|
|
|
|
case RecordTypes::TAIL_EXIT:
|
|
|
|
// No entries to record end for.
|
|
|
|
if (StackCursor == nullptr)
|
|
|
|
break;
|
|
|
|
// Should we emit an END record anyway or account this condition?
|
|
|
|
// (And/Or in loop termination below)
|
|
|
|
StackTrieNode *PreviousCursor = nullptr;
|
|
|
|
do {
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
if (PreviousCursor != nullptr) {
|
|
|
|
OS << ",\n";
|
|
|
|
}
|
|
|
|
writeTraceViewerRecord(Version, OS, StackCursor->FuncId, R.TId, R.PId,
|
|
|
|
Symbolize, FuncIdHelper, EventTimestampUs,
|
|
|
|
*StackCursor, "E");
|
2017-11-07 08:28:28 +08:00
|
|
|
PreviousCursor = StackCursor;
|
|
|
|
StackCursor = StackCursor->Parent;
|
|
|
|
} while (PreviousCursor->FuncId != R.FuncId && StackCursor != nullptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
OS << "\n ],\n"; // Close the Trace Events array.
|
|
|
|
OS << " "
|
|
|
|
<< "\"displayTimeUnit\": \"ns\",\n";
|
2017-11-07 08:28:28 +08:00
|
|
|
|
|
|
|
// The stackFrames dictionary substantially reduces size of the output file by
|
|
|
|
// avoiding repeating the entire call stack of function names for each entry.
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
OS << R"( "stackFrames": {)";
|
|
|
|
int stack_frame_count = 0;
|
|
|
|
for (auto map_iter : StacksByStackId) {
|
|
|
|
if (stack_frame_count++ == 0)
|
|
|
|
OS << "\n";
|
|
|
|
else
|
|
|
|
OS << ",\n";
|
|
|
|
OS << " ";
|
|
|
|
OS << llvm::formatv(
|
|
|
|
R"("{0}" : { "name" : "{1}")", map_iter.first,
|
|
|
|
(Symbolize ? FuncIdHelper.SymbolOrNumber(map_iter.second->FuncId)
|
|
|
|
: llvm::to_string(map_iter.second->FuncId)));
|
|
|
|
if (map_iter.second->Parent != nullptr)
|
|
|
|
OS << llvm::formatv(R"(, "parent": "{0}")",
|
|
|
|
map_iter.second->Parent->ExtraData.id);
|
|
|
|
OS << " }";
|
2017-11-07 08:28:28 +08:00
|
|
|
}
|
[XRay][tools] Revert "Use Support/JSON.h in llvm-xray convert"
Summary:
This reverts D50129 / rL338834: [XRay][tools] Use Support/JSON.h in llvm-xray convert
Abstractions are great.
Readable code is great.
JSON support library is a *good* idea.
However unfortunately, there is an internal detail that one needs
to be aware of in `llvm::json::Object` - it uses `llvm::DenseMap`.
So for **every** `llvm::json::Object`, even if you only store a single `int`
entry there, you pay the whole price of `llvm::DenseMap`.
Unfortunately, it matters for `llvm-xray`.
I was trying to analyse the `llvm-exegesis` analysis mode performance,
and for that i wanted to view the LLVM X-Ray log visualization in Chrome
trace viewer. And the `llvm-xray convert` is sluggish, and sometimes
even ended up being killed by OOM.
`xray-log.llvm-exegesis.lwZ0sT` was acquired from `llvm-exegesis`
(compiled with ` -fxray-instruction-threshold=128`)
analysis mode over `-benchmarks-file` with 10099 points (one full
latency measurement set), with normal runtime of 0.387s.
Timings:
Old: (copied from D58580)
```
$ perf stat -r 5 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (5 runs):
21346.24 msec task-clock # 1.000 CPUs utilized ( +- 0.28% )
314 context-switches # 14.701 M/sec ( +- 59.13% )
1 cpu-migrations # 0.037 M/sec ( +-100.00% )
2181354 page-faults # 102191.251 M/sec ( +- 0.02% )
85477442102 cycles # 4004415.019 GHz ( +- 0.28% ) (83.33%)
14526427066 stalled-cycles-frontend # 16.99% frontend cycles idle ( +- 0.70% ) (83.33%)
32371533721 stalled-cycles-backend # 37.87% backend cycles idle ( +- 0.27% ) (33.34%)
67896890228 instructions # 0.79 insn per cycle
# 0.48 stalled cycles per insn ( +- 0.03% ) (50.00%)
14592654840 branches # 683631198.653 M/sec ( +- 0.02% ) (66.67%)
212207534 branch-misses # 1.45% of all branches ( +- 0.94% ) (83.34%)
21.3502 +- 0.0585 seconds time elapsed ( +- 0.27% )
```
New:
```
$ perf stat -r 9 ./bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT
Performance counter stats for './bin/llvm-xray convert -sort -symbolize -instr_map=./bin/llvm-exegesis -output-format=trace_event -output=/tmp/trace.yml xray-log.llvm-exegesis.lwZ0sT' (9 runs):
7178.38 msec task-clock # 1.000 CPUs utilized ( +- 0.26% )
182 context-switches # 25.402 M/sec ( +- 28.84% )
0 cpu-migrations # 0.046 M/sec ( +- 70.71% )
33701 page-faults # 4694.994 M/sec ( +- 0.88% )
28761053971 cycles # 4006833.933 GHz ( +- 0.26% ) (83.32%)
2028297997 stalled-cycles-frontend # 7.05% frontend cycles idle ( +- 1.61% ) (83.32%)
10773154901 stalled-cycles-backend # 37.46% backend cycles idle ( +- 0.38% ) (33.36%)
36199132874 instructions # 1.26 insn per cycle
# 0.30 stalled cycles per insn ( +- 0.03% ) (50.02%)
6434504227 branches # 896420204.421 M/sec ( +- 0.03% ) (66.68%)
73355176 branch-misses # 1.14% of all branches ( +- 1.46% ) (83.33%)
7.1807 +- 0.0190 seconds time elapsed ( +- 0.26% )
```
So using `llvm::json` nearly triples run-time on that test case.
(+3x is times, not percent.)
Memory:
Old:
```
total runtime: 39.88s.
bytes allocated in total (ignoring deallocations): 79.07GB (1.98GB/s)
calls to allocation functions: 33267816 (834135/s)
temporary memory allocations: 5832298 (146235/s)
peak heap memory consumption: 9.21GB
peak RSS (including heaptrack overhead): 147.98GB
total memory leaked: 1.09MB
```
New:
```
total runtime: 17.42s.
bytes allocated in total (ignoring deallocations): 5.12GB (293.86MB/s)
calls to allocation functions: 21382982 (1227284/s)
temporary memory allocations: 232858 (13364/s)
peak heap memory consumption: 350.69MB
peak RSS (including heaptrack overhead): 2.55GB
total memory leaked: 79.95KB
```
Diff:
```
total runtime: -22.46s.
bytes allocated in total (ignoring deallocations): -73.95GB (3.29GB/s)
calls to allocation functions: -11884834 (529155/s)
temporary memory allocations: -5599440 (249307/s)
peak heap memory consumption: -8.86GB
peak RSS (including heaptrack overhead): 0B
total memory leaked: -1.01MB
```
So using `llvm::json` increases *peak* memory consumption on *this* testcase ~+27x.
And total allocation count +15x. Both of these numbers are times, *not* percent.
And note that memory usage is clearly unbound with `llvm::json`, it directly depends
on the length of the log, so peak memory consumption is always increasing.
This isn't so with the dumb code, there is no accumulating memory consumption,
peak memory consumption is fixed. Naturally, that means it will handle *much*
larger logs without OOM'ing.
Readability is good, but the price is simply unacceptable here.
Too bad none of this analysis was done as part of the development/review D50129 itself.
Reviewers: dberris, kpw, sammccall
Reviewed By: dberris
Subscribers: riccibruno, hans, courbet, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58584
llvm-svn: 354764
2019-02-25 15:39:07 +08:00
|
|
|
OS << "\n }\n"; // Close the stack frames map.
|
|
|
|
OS << "}\n"; // Close the JSON entry.
|
2017-11-07 08:28:28 +08:00
|
|
|
}
|
|
|
|
|
2017-01-10 10:38:11 +08:00
|
|
|
namespace llvm {
|
|
|
|
namespace xray {
|
|
|
|
|
|
|
|
static CommandRegistration Unused(&Convert, []() -> Error {
|
|
|
|
// FIXME: Support conversion to BINARY when upgrading XRay trace versions.
|
2017-02-01 08:05:29 +08:00
|
|
|
InstrumentationMap Map;
|
|
|
|
if (!ConvertInstrMap.empty()) {
|
|
|
|
auto InstrumentationMapOrError = loadInstrumentationMap(ConvertInstrMap);
|
|
|
|
if (!InstrumentationMapOrError)
|
|
|
|
return joinErrors(make_error<StringError>(
|
|
|
|
Twine("Cannot open instrumentation map '") +
|
|
|
|
ConvertInstrMap + "'",
|
|
|
|
std::make_error_code(std::errc::invalid_argument)),
|
|
|
|
InstrumentationMapOrError.takeError());
|
|
|
|
Map = std::move(*InstrumentationMapOrError);
|
|
|
|
}
|
2017-01-10 10:38:11 +08:00
|
|
|
|
2017-02-01 08:05:29 +08:00
|
|
|
const auto &FunctionAddresses = Map.getFunctionAddresses();
|
2017-01-10 10:38:11 +08:00
|
|
|
symbolize::LLVMSymbolizer::Options Opts(
|
|
|
|
symbolize::FunctionNameKind::LinkageName, true, true, false, "");
|
|
|
|
symbolize::LLVMSymbolizer Symbolizer(Opts);
|
|
|
|
llvm::xray::FuncIdConversionHelper FuncIdHelper(ConvertInstrMap, Symbolizer,
|
|
|
|
FunctionAddresses);
|
|
|
|
llvm::xray::TraceConverter TC(FuncIdHelper, ConvertSymbolize);
|
2017-02-01 08:05:29 +08:00
|
|
|
std::error_code EC;
|
2017-01-10 10:38:11 +08:00
|
|
|
raw_fd_ostream OS(ConvertOutput, EC,
|
|
|
|
ConvertOutputFormat == ConvertFormats::BINARY
|
|
|
|
? sys::fs::OpenFlags::F_None
|
|
|
|
: sys::fs::OpenFlags::F_Text);
|
|
|
|
if (EC)
|
|
|
|
return make_error<StringError>(
|
|
|
|
Twine("Cannot open file '") + ConvertOutput + "' for writing.", EC);
|
|
|
|
|
2017-02-01 08:05:29 +08:00
|
|
|
auto TraceOrErr = loadTraceFile(ConvertInput, ConvertSortInput);
|
|
|
|
if (!TraceOrErr)
|
2017-01-11 14:39:09 +08:00
|
|
|
return joinErrors(
|
|
|
|
make_error<StringError>(
|
|
|
|
Twine("Failed loading input file '") + ConvertInput + "'.",
|
2017-01-13 02:33:14 +08:00
|
|
|
std::make_error_code(std::errc::executable_format_error)),
|
2017-01-11 14:39:09 +08:00
|
|
|
TraceOrErr.takeError());
|
2017-02-01 08:05:29 +08:00
|
|
|
|
|
|
|
auto &T = *TraceOrErr;
|
|
|
|
switch (ConvertOutputFormat) {
|
|
|
|
case ConvertFormats::YAML:
|
|
|
|
TC.exportAsYAML(T, OS);
|
|
|
|
break;
|
|
|
|
case ConvertFormats::BINARY:
|
|
|
|
TC.exportAsRAWv1(T, OS);
|
|
|
|
break;
|
2017-11-07 08:28:28 +08:00
|
|
|
case ConvertFormats::CHROME_TRACE_EVENT:
|
|
|
|
TC.exportAsChromeTraceEventFormat(T, OS);
|
|
|
|
break;
|
2017-01-10 10:38:11 +08:00
|
|
|
}
|
|
|
|
return Error::success();
|
|
|
|
});
|
|
|
|
|
|
|
|
} // namespace xray
|
|
|
|
} // namespace llvm
|