llvm-project/lldb/source/Utility/TraceGDBRemotePackets.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

159 lines
4.9 KiB
C++
Raw Normal View History

//===-- TraceGDBRemotePackets.cpp -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "lldb/Utility/TraceGDBRemotePackets.h"
using namespace llvm;
using namespace llvm::json;
namespace lldb_private {
/// jLLDBTraceSupported
/// \{
bool fromJSON(const json::Value &value, TraceSupportedResponse &packet,
Path path) {
ObjectMapper o(value, path);
return o && o.map("description", packet.description) &&
o.map("name", packet.name);
}
json::Value toJSON(const TraceSupportedResponse &packet) {
return json::Value(
Object{{"description", packet.description}, {"name", packet.name}});
}
/// \}
/// jLLDBTraceStart
/// \{
bool TraceStartRequest::IsProcessTracing() const { return !(bool)tids; }
bool fromJSON(const json::Value &value, TraceStartRequest &packet, Path path) {
ObjectMapper o(value, path);
return o && o.map("type", packet.type) && o.map("tids", packet.tids);
}
json::Value toJSON(const TraceStartRequest &packet) {
return json::Value(Object{{"tids", packet.tids}, {"type", packet.type}});
}
/// \}
/// jLLDBTraceStop
/// \{
TraceStopRequest::TraceStopRequest(llvm::StringRef type,
const std::vector<lldb::tid_t> &tids_)
: type(type) {
tids.emplace();
for (lldb::tid_t tid : tids_)
tids->push_back(tid);
}
bool TraceStopRequest::IsProcessTracing() const { return !(bool)tids; }
bool fromJSON(const json::Value &value, TraceStopRequest &packet, Path path) {
ObjectMapper o(value, path);
return o && o.map("type", packet.type) && o.map("tids", packet.tids);
}
json::Value toJSON(const TraceStopRequest &packet) {
return json::Value(Object{{"type", packet.type}, {"tids", packet.tids}});
}
/// \}
/// jLLDBTraceGetState
/// \{
bool fromJSON(const json::Value &value, TraceGetStateRequest &packet,
Path path) {
ObjectMapper o(value, path);
return o && o.map("type", packet.type);
}
json::Value toJSON(const TraceGetStateRequest &packet) {
return json::Value(Object{{"type", packet.type}});
}
bool fromJSON(const json::Value &value, TraceBinaryData &packet, Path path) {
ObjectMapper o(value, path);
return o && o.map("kind", packet.kind) && o.map("size", packet.size);
}
json::Value toJSON(const TraceBinaryData &packet) {
return json::Value(Object{{"kind", packet.kind}, {"size", packet.size}});
}
bool fromJSON(const json::Value &value, TraceThreadState &packet, Path path) {
ObjectMapper o(value, path);
return o && o.map("tid", packet.tid) &&
o.map("binaryData", packet.binary_data);
}
json::Value toJSON(const TraceThreadState &packet) {
return json::Value(
Object{{"tid", packet.tid}, {"binaryData", packet.binary_data}});
}
bool fromJSON(const json::Value &value, TraceGetStateResponse &packet,
Path path) {
ObjectMapper o(value, path);
return o && o.map("tracedThreads", packet.traced_threads) &&
o.map("processBinaryData", packet.process_binary_data) &&
o.map("cpus", packet.cpus) && o.map("warnings", packet.warnings);
}
json::Value toJSON(const TraceGetStateResponse &packet) {
return json::Value(Object{{"tracedThreads", packet.traced_threads},
{"processBinaryData", packet.process_binary_data},
{"cpus", packet.cpus},
{"warnings", packet.warnings}});
}
[trace][intelpt] Support system-wide tracing [12] - Support multi-core trace load and save :q! This diff is massive, but it's because it connects the client with lldb-server and also ensures that the postmortem case works. - Flatten the postmortem trace schema. The reason is that the schema has become quite complex due to the new multicore case, which defeats the original purpose of having a schema that could work for every trace plug-in. At this point, it's better that each trace plug-in defines it's own full schema. This means that the only common field is "type". -- Because of this new approach, I merged the "common" trace load and saving functionalities into the IntelPT one. This simplified the code quite a bit. If we eventually implement another trace plug-in, we can see then what we could reuse. -- The new schema, which is flattened, has now better comments and is parsed better. A change I did was to disallow hex addresses, because they are a bit error prone. I'm asking now to print the address in decimal. -- Renamed "intel" to "GenuineIntel" in the schema because that's what you see in /proc/cpuinfo. - Implemented reading the context switch trace data buffer. I had to do some refactors to do that cleanly. -- A major change that I did here was to simplify the perf_event circular buffer reading logic. It was too complex. Maybe the original Intel author had something different in mind. - Implemented all the necessary bits to read trace.json files with per-core data. - Implemented all the necessary bits to save to disk per-core trace session. - Added a test that ensures that parsing and saving to disk works. Differential Revision: https://reviews.llvm.org/D126015
2022-05-19 12:36:34 +08:00
void TraceGetStateResponse::AddWarning(StringRef warning) {
if (!warnings)
warnings.emplace();
warnings->push_back(warning.data());
}
bool fromJSON(const json::Value &value, TraceCpuState &packet,
json::Path path) {
ObjectMapper o(value, path);
uint64_t cpu_id;
if (!(o && o.map("id", cpu_id) && o.map("binaryData", packet.binary_data)))
return false;
packet.id = static_cast<lldb::cpu_id_t>(cpu_id);
return true;
}
json::Value toJSON(const TraceCpuState &packet) {
return json::Value(
Object{{"id", packet.id}, {"binaryData", packet.binary_data}});
}
/// \}
/// jLLDBTraceGetBinaryData
/// \{
json::Value toJSON(const TraceGetBinaryDataRequest &packet) {
return json::Value(Object{{"type", packet.type},
{"kind", packet.kind},
{"tid", packet.tid},
{"cpuId", packet.cpu_id}});
}
bool fromJSON(const json::Value &value, TraceGetBinaryDataRequest &packet,
Path path) {
ObjectMapper o(value, path);
Optional<uint64_t> cpu_id;
if (!(o && o.map("type", packet.type) && o.map("kind", packet.kind) &&
o.map("tid", packet.tid) && o.map("cpuId", cpu_id)))
[trace][intelpt] Support system-wide tracing [13] - Add context switch decoding - Add the logic that parses all cpu context switch traces and produces blocks of continuous executions, which will be later used to assign intel pt subtraces to threads and to identify gaps. This logic can also identify if the context switch trace is malformed. - The continuous executions blocks are able to indicate when there were some contention issues when producing the context switch trace. See the inline comments for more information. - Update the 'dump info' command to show information and stats related to the multicore decoding flow, including timing about context switch decoding. - Add the logic to conver nanoseconds to TSCs. - Fix a bug when returning the context switches. Now they data returned makes sense and even empty traces can be returned from lldb-server. - Finish the necessary bits for loading and saving a multi-core trace bundle from disk. - Change some size_t to uint64_t for compatibility with 32 bit systems. Tested by saving a trace session of a program that sleeps 100 times, it was able to produce the following 'dump info' text: ``` (lldb) trace load /tmp/trace3/trace.json (lldb) thread trace dump info Trace technology: intel-pt thread #1: tid = 4192415 Total number of instructions: 1 Memory usage: Total approximate memory usage (excluding raw trace): 2.51 KiB Average memory usage per instruction (excluding raw trace): 2573.00 bytes Timing for this thread: Timing for global tasks: Context switch trace decoding: 0.00s Events: Number of instructions with events: 0 Number of individual events: 0 Multi-core decoding: Total number of continuous executions found: 2499 Number of continuous executions for this thread: 102 Errors: Number of TSC decoding errors: 0 ``` Differential Revision: https://reviews.llvm.org/D126267
2022-05-20 07:39:20 +08:00
return false;
if (cpu_id)
packet.cpu_id = static_cast<lldb::cpu_id_t>(*cpu_id);
[trace][intelpt] Support system-wide tracing [13] - Add context switch decoding - Add the logic that parses all cpu context switch traces and produces blocks of continuous executions, which will be later used to assign intel pt subtraces to threads and to identify gaps. This logic can also identify if the context switch trace is malformed. - The continuous executions blocks are able to indicate when there were some contention issues when producing the context switch trace. See the inline comments for more information. - Update the 'dump info' command to show information and stats related to the multicore decoding flow, including timing about context switch decoding. - Add the logic to conver nanoseconds to TSCs. - Fix a bug when returning the context switches. Now they data returned makes sense and even empty traces can be returned from lldb-server. - Finish the necessary bits for loading and saving a multi-core trace bundle from disk. - Change some size_t to uint64_t for compatibility with 32 bit systems. Tested by saving a trace session of a program that sleeps 100 times, it was able to produce the following 'dump info' text: ``` (lldb) trace load /tmp/trace3/trace.json (lldb) thread trace dump info Trace technology: intel-pt thread #1: tid = 4192415 Total number of instructions: 1 Memory usage: Total approximate memory usage (excluding raw trace): 2.51 KiB Average memory usage per instruction (excluding raw trace): 2573.00 bytes Timing for this thread: Timing for global tasks: Context switch trace decoding: 0.00s Events: Number of instructions with events: 0 Number of individual events: 0 Multi-core decoding: Total number of continuous executions found: 2499 Number of continuous executions for this thread: 102 Errors: Number of TSC decoding errors: 0 ``` Differential Revision: https://reviews.llvm.org/D126267
2022-05-20 07:39:20 +08:00
return true;
}
/// \}
} // namespace lldb_private