2021-11-20 06:02:41 +08:00
|
|
|
//===- RawMemProfReader.cpp - Instrumented memory profiling reader --------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains support for reading MemProf profiling data.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-03-03 09:28:09 +08:00
|
|
|
#include <algorithm>
|
2021-11-20 06:02:41 +08:00
|
|
|
#include <cstdint>
|
2022-05-25 07:58:36 +08:00
|
|
|
#include <memory>
|
2021-12-01 07:56:17 +08:00
|
|
|
#include <type_traits>
|
2021-11-20 06:02:41 +08:00
|
|
|
|
2022-03-23 05:40:02 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2022-02-05 03:11:47 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2022-03-03 09:28:09 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2022-06-02 06:20:53 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2022-02-05 03:11:47 +08:00
|
|
|
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
|
|
|
|
#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
|
|
|
|
#include "llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h"
|
|
|
|
#include "llvm/Object/Binary.h"
|
|
|
|
#include "llvm/Object/ELFObjectFile.h"
|
|
|
|
#include "llvm/Object/ObjectFile.h"
|
2021-11-20 06:02:41 +08:00
|
|
|
#include "llvm/ProfileData/InstrProf.h"
|
2022-02-05 03:11:47 +08:00
|
|
|
#include "llvm/ProfileData/MemProf.h"
|
2021-11-20 06:02:41 +08:00
|
|
|
#include "llvm/ProfileData/MemProfData.inc"
|
|
|
|
#include "llvm/ProfileData/RawMemProfReader.h"
|
2022-02-05 03:11:47 +08:00
|
|
|
#include "llvm/Support/Endian.h"
|
2022-03-03 09:28:09 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2021-11-20 06:02:41 +08:00
|
|
|
|
2022-02-09 07:45:34 +08:00
|
|
|
#define DEBUG_TYPE "memprof"
|
|
|
|
|
2021-11-20 06:02:41 +08:00
|
|
|
namespace llvm {
|
|
|
|
namespace memprof {
|
|
|
|
namespace {
|
2021-12-01 07:56:17 +08:00
|
|
|
template <class T = uint64_t> inline T alignedRead(const char *Ptr) {
|
|
|
|
static_assert(std::is_pod<T>::value, "Not a pod type.");
|
|
|
|
assert(reinterpret_cast<size_t>(Ptr) % sizeof(T) == 0 && "Unaligned Read");
|
|
|
|
return *reinterpret_cast<const T *>(Ptr);
|
|
|
|
}
|
|
|
|
|
2022-02-05 03:11:47 +08:00
|
|
|
Error checkBuffer(const MemoryBuffer &Buffer) {
|
|
|
|
if (!RawMemProfReader::hasFormat(Buffer))
|
|
|
|
return make_error<InstrProfError>(instrprof_error::bad_magic);
|
2022-02-04 08:09:23 +08:00
|
|
|
|
2022-02-05 03:11:47 +08:00
|
|
|
if (Buffer.getBufferSize() == 0)
|
2021-11-20 06:02:41 +08:00
|
|
|
return make_error<InstrProfError>(instrprof_error::empty_raw_profile);
|
|
|
|
|
2022-02-05 03:11:47 +08:00
|
|
|
if (Buffer.getBufferSize() < sizeof(Header)) {
|
2021-11-20 06:02:41 +08:00
|
|
|
return make_error<InstrProfError>(instrprof_error::truncated);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The size of the buffer can be > header total size since we allow repeated
|
|
|
|
// serialization of memprof profiles to the same file.
|
|
|
|
uint64_t TotalSize = 0;
|
2022-02-05 03:11:47 +08:00
|
|
|
const char *Next = Buffer.getBufferStart();
|
|
|
|
while (Next < Buffer.getBufferEnd()) {
|
2021-11-20 06:02:41 +08:00
|
|
|
auto *H = reinterpret_cast<const Header *>(Next);
|
|
|
|
if (H->Version != MEMPROF_RAW_VERSION) {
|
|
|
|
return make_error<InstrProfError>(instrprof_error::unsupported_version);
|
|
|
|
}
|
|
|
|
|
|
|
|
TotalSize += H->TotalSize;
|
|
|
|
Next += H->TotalSize;
|
|
|
|
}
|
|
|
|
|
2022-02-05 03:11:47 +08:00
|
|
|
if (Buffer.getBufferSize() != TotalSize) {
|
2021-11-20 06:02:41 +08:00
|
|
|
return make_error<InstrProfError>(instrprof_error::malformed);
|
|
|
|
}
|
2022-02-05 03:11:47 +08:00
|
|
|
return Error::success();
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::SmallVector<SegmentEntry> readSegmentEntries(const char *Ptr) {
|
|
|
|
using namespace support;
|
|
|
|
|
|
|
|
const uint64_t NumItemsToRead =
|
|
|
|
endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
llvm::SmallVector<SegmentEntry> Items;
|
|
|
|
for (uint64_t I = 0; I < NumItemsToRead; I++) {
|
|
|
|
Items.push_back(*reinterpret_cast<const SegmentEntry *>(
|
|
|
|
Ptr + I * sizeof(SegmentEntry)));
|
|
|
|
}
|
|
|
|
return Items;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
|
|
|
|
readMemInfoBlocks(const char *Ptr) {
|
|
|
|
using namespace support;
|
|
|
|
|
|
|
|
const uint64_t NumItemsToRead =
|
|
|
|
endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>> Items;
|
|
|
|
for (uint64_t I = 0; I < NumItemsToRead; I++) {
|
|
|
|
const uint64_t Id = endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
const MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr);
|
|
|
|
Items.push_back({Id, MIB});
|
|
|
|
// Only increment by size of MIB since readNext implicitly increments.
|
|
|
|
Ptr += sizeof(MemInfoBlock);
|
|
|
|
}
|
|
|
|
return Items;
|
|
|
|
}
|
|
|
|
|
|
|
|
CallStackMap readStackInfo(const char *Ptr) {
|
|
|
|
using namespace support;
|
|
|
|
|
|
|
|
const uint64_t NumItemsToRead =
|
|
|
|
endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
CallStackMap Items;
|
|
|
|
|
|
|
|
for (uint64_t I = 0; I < NumItemsToRead; I++) {
|
|
|
|
const uint64_t StackId = endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
const uint64_t NumPCs = endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
|
2022-02-24 04:37:42 +08:00
|
|
|
SmallVector<uint64_t> CallStack;
|
2022-02-05 03:11:47 +08:00
|
|
|
for (uint64_t J = 0; J < NumPCs; J++) {
|
|
|
|
CallStack.push_back(endian::readNext<uint64_t, little, unaligned>(Ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
Items[StackId] = CallStack;
|
|
|
|
}
|
|
|
|
return Items;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merges the contents of stack information in \p From to \p To. Returns true if
|
|
|
|
// any stack ids observed previously map to a different set of program counter
|
|
|
|
// addresses.
|
|
|
|
bool mergeStackMap(const CallStackMap &From, CallStackMap &To) {
|
|
|
|
for (const auto &IdStack : From) {
|
|
|
|
auto I = To.find(IdStack.first);
|
|
|
|
if (I == To.end()) {
|
|
|
|
To[IdStack.first] = IdStack.second;
|
|
|
|
} else {
|
|
|
|
// Check that the PCs are the same (in order).
|
|
|
|
if (IdStack.second != I->second)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Error report(Error E, const StringRef Context) {
|
|
|
|
return joinErrors(createStringError(inconvertibleErrorCode(), Context),
|
|
|
|
std::move(E));
|
|
|
|
}
|
2022-03-03 09:28:09 +08:00
|
|
|
|
|
|
|
bool isRuntimePath(const StringRef Path) {
|
|
|
|
return StringRef(llvm::sys::path::convert_to_slash(Path))
|
|
|
|
.contains("memprof/memprof_");
|
|
|
|
}
|
2022-06-02 06:20:53 +08:00
|
|
|
|
|
|
|
std::string getBuildIdString(const SegmentEntry &Entry) {
|
|
|
|
constexpr size_t Size = sizeof(Entry.BuildId) / sizeof(uint8_t);
|
|
|
|
constexpr uint8_t Zeros[Size] = {0};
|
|
|
|
// If the build id is unset print a helpful string instead of all zeros.
|
|
|
|
if (memcmp(Entry.BuildId, Zeros, Size) == 0)
|
|
|
|
return "<None>";
|
|
|
|
|
|
|
|
std::string Str;
|
|
|
|
raw_string_ostream OS(Str);
|
|
|
|
for (size_t I = 0; I < Size; I++) {
|
|
|
|
OS << format_hex_no_prefix(Entry.BuildId[I], 2);
|
|
|
|
}
|
|
|
|
return OS.str();
|
|
|
|
}
|
2022-02-05 03:11:47 +08:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
Expected<std::unique_ptr<RawMemProfReader>>
|
2022-05-25 07:58:36 +08:00
|
|
|
RawMemProfReader::create(const Twine &Path, const StringRef ProfiledBinary,
|
|
|
|
bool KeepName) {
|
2022-02-05 03:11:47 +08:00
|
|
|
auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
|
|
|
|
if (std::error_code EC = BufferOr.getError())
|
|
|
|
return report(errorCodeToError(EC), Path.getSingleStringRef());
|
|
|
|
|
|
|
|
std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
|
|
|
|
if (Error E = checkBuffer(*Buffer))
|
|
|
|
return report(std::move(E), Path.getSingleStringRef());
|
|
|
|
|
|
|
|
if (ProfiledBinary.empty())
|
|
|
|
return report(
|
|
|
|
errorCodeToError(make_error_code(std::errc::invalid_argument)),
|
|
|
|
"Path to profiled binary is empty!");
|
|
|
|
|
|
|
|
auto BinaryOr = llvm::object::createBinary(ProfiledBinary);
|
|
|
|
if (!BinaryOr) {
|
|
|
|
return report(BinaryOr.takeError(), ProfiledBinary);
|
|
|
|
}
|
|
|
|
|
2022-05-25 07:58:36 +08:00
|
|
|
// Use new here since constructor is private.
|
2022-06-02 06:20:53 +08:00
|
|
|
std::unique_ptr<RawMemProfReader> Reader(
|
|
|
|
new RawMemProfReader(std::move(BinaryOr.get()), KeepName));
|
|
|
|
if (Error E = Reader->initialize(std::move(Buffer))) {
|
2022-02-05 03:11:47 +08:00
|
|
|
return std::move(E);
|
|
|
|
}
|
|
|
|
return std::move(Reader);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RawMemProfReader::hasFormat(const StringRef Path) {
|
|
|
|
auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
|
|
|
|
if (!BufferOr)
|
|
|
|
return false;
|
2021-11-20 06:02:41 +08:00
|
|
|
|
2022-02-05 03:11:47 +08:00
|
|
|
std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
|
|
|
|
return hasFormat(*Buffer);
|
2021-11-20 06:02:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool RawMemProfReader::hasFormat(const MemoryBuffer &Buffer) {
|
|
|
|
if (Buffer.getBufferSize() < sizeof(uint64_t))
|
|
|
|
return false;
|
2021-12-01 07:56:17 +08:00
|
|
|
// Aligned read to sanity check that the buffer was allocated with at least 8b
|
|
|
|
// alignment.
|
|
|
|
const uint64_t Magic = alignedRead(Buffer.getBufferStart());
|
2021-11-20 06:02:41 +08:00
|
|
|
return Magic == MEMPROF_RAW_MAGIC_64;
|
|
|
|
}
|
|
|
|
|
2022-01-07 08:14:41 +08:00
|
|
|
void RawMemProfReader::printYAML(raw_ostream &OS) {
|
2022-06-02 06:20:53 +08:00
|
|
|
uint64_t NumAllocFunctions = 0, NumMibInfo = 0;
|
|
|
|
for (const auto &KV : FunctionProfileData) {
|
|
|
|
const size_t NumAllocSites = KV.second.AllocSites.size();
|
|
|
|
if (NumAllocSites > 0) {
|
|
|
|
NumAllocFunctions++;
|
|
|
|
NumMibInfo += NumAllocSites;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-07 08:14:41 +08:00
|
|
|
OS << "MemprofProfile:\n";
|
2022-06-02 06:20:53 +08:00
|
|
|
OS << " Summary:\n";
|
|
|
|
OS << " Version: " << MEMPROF_RAW_VERSION << "\n";
|
|
|
|
OS << " NumSegments: " << SegmentInfo.size() << "\n";
|
|
|
|
OS << " NumMibInfo: " << NumMibInfo << "\n";
|
|
|
|
OS << " NumAllocFunctions: " << NumAllocFunctions << "\n";
|
|
|
|
OS << " NumStackOffsets: " << StackMap.size() << "\n";
|
2022-06-02 06:20:53 +08:00
|
|
|
// Print out the segment information.
|
|
|
|
OS << " Segments:\n";
|
|
|
|
for (const auto &Entry : SegmentInfo) {
|
|
|
|
OS << " -\n";
|
|
|
|
OS << " BuildId: " << getBuildIdString(Entry) << "\n";
|
|
|
|
OS << " Start: 0x" << llvm::utohexstr(Entry.Start) << "\n";
|
|
|
|
OS << " End: 0x" << llvm::utohexstr(Entry.End) << "\n";
|
|
|
|
OS << " Offset: 0x" << llvm::utohexstr(Entry.Offset) << "\n";
|
|
|
|
}
|
2022-02-05 03:11:47 +08:00
|
|
|
// Print out the merged contents of the profiles.
|
|
|
|
OS << " Records:\n";
|
2022-03-23 05:40:02 +08:00
|
|
|
for (const auto &Entry : *this) {
|
2022-02-05 03:11:47 +08:00
|
|
|
OS << " -\n";
|
2022-03-23 05:40:02 +08:00
|
|
|
OS << " FunctionGUID: " << Entry.first << "\n";
|
|
|
|
Entry.second.print(OS);
|
2022-02-05 03:11:47 +08:00
|
|
|
}
|
2022-01-07 08:14:41 +08:00
|
|
|
}
|
|
|
|
|
2022-06-02 06:20:53 +08:00
|
|
|
Error RawMemProfReader::initialize(std::unique_ptr<MemoryBuffer> DataBuffer) {
|
2022-02-05 03:11:47 +08:00
|
|
|
const StringRef FileName = Binary.getBinary()->getFileName();
|
|
|
|
|
|
|
|
auto *ElfObject = dyn_cast<object::ELFObjectFileBase>(Binary.getBinary());
|
|
|
|
if (!ElfObject) {
|
|
|
|
return report(make_error<StringError>(Twine("Not an ELF file: "),
|
|
|
|
inconvertibleErrorCode()),
|
|
|
|
FileName);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto Triple = ElfObject->makeTriple();
|
|
|
|
if (!Triple.isX86())
|
|
|
|
return report(make_error<StringError>(Twine("Unsupported target: ") +
|
|
|
|
Triple.getArchName(),
|
|
|
|
inconvertibleErrorCode()),
|
|
|
|
FileName);
|
|
|
|
|
|
|
|
auto *Object = cast<object::ObjectFile>(Binary.getBinary());
|
|
|
|
std::unique_ptr<DIContext> Context = DWARFContext::create(
|
|
|
|
*Object, DWARFContext::ProcessDebugRelocations::Process);
|
|
|
|
|
|
|
|
auto SOFOr = symbolize::SymbolizableObjectFile::create(
|
|
|
|
Object, std::move(Context), /*UntagAddresses=*/false);
|
|
|
|
if (!SOFOr)
|
|
|
|
return report(SOFOr.takeError(), FileName);
|
|
|
|
Symbolizer = std::move(SOFOr.get());
|
|
|
|
|
2022-06-02 06:20:53 +08:00
|
|
|
if (Error E = readRawProfile(std::move(DataBuffer)))
|
2022-02-24 04:37:42 +08:00
|
|
|
return E;
|
|
|
|
|
2022-03-23 05:40:02 +08:00
|
|
|
if (Error E = symbolizeAndFilterStackFrames())
|
|
|
|
return E;
|
|
|
|
|
|
|
|
return mapRawProfileToRecords();
|
|
|
|
}
|
|
|
|
|
|
|
|
Error RawMemProfReader::mapRawProfileToRecords() {
|
|
|
|
// Hold a mapping from function to each callsite location we encounter within
|
|
|
|
// it that is part of some dynamic allocation context. The location is stored
|
|
|
|
// as a pointer to a symbolized list of inline frames.
|
2022-03-22 10:39:24 +08:00
|
|
|
using LocationPtr = const llvm::SmallVector<FrameId> *;
|
2022-03-23 05:40:02 +08:00
|
|
|
llvm::DenseMap<GlobalValue::GUID, llvm::SetVector<LocationPtr>>
|
|
|
|
PerFunctionCallSites;
|
|
|
|
|
|
|
|
// Convert the raw profile callstack data into memprof records. While doing so
|
|
|
|
// keep track of related contexts so that we can fill these in later.
|
|
|
|
for (const auto &Entry : CallstackProfileData) {
|
|
|
|
const uint64_t StackId = Entry.first;
|
|
|
|
|
|
|
|
auto It = StackMap.find(StackId);
|
|
|
|
if (It == StackMap.end())
|
|
|
|
return make_error<InstrProfError>(
|
|
|
|
instrprof_error::malformed,
|
|
|
|
"memprof callstack record does not contain id: " + Twine(StackId));
|
|
|
|
|
|
|
|
// Construct the symbolized callstack.
|
2022-03-22 10:39:24 +08:00
|
|
|
llvm::SmallVector<FrameId> Callstack;
|
2022-03-23 05:40:02 +08:00
|
|
|
Callstack.reserve(It->getSecond().size());
|
|
|
|
|
|
|
|
llvm::ArrayRef<uint64_t> Addresses = It->getSecond();
|
|
|
|
for (size_t I = 0; I < Addresses.size(); I++) {
|
|
|
|
const uint64_t Address = Addresses[I];
|
|
|
|
assert(SymbolizedFrame.count(Address) > 0 &&
|
|
|
|
"Address not found in SymbolizedFrame map");
|
2022-03-22 10:39:24 +08:00
|
|
|
const SmallVector<FrameId> &Frames = SymbolizedFrame[Address];
|
2022-03-23 05:40:02 +08:00
|
|
|
|
2022-03-22 10:39:24 +08:00
|
|
|
assert(!idToFrame(Frames.back()).IsInlineFrame &&
|
2022-03-23 05:40:02 +08:00
|
|
|
"The last frame should not be inlined");
|
|
|
|
|
|
|
|
// Record the callsites for each function. Skip the first frame of the
|
|
|
|
// first address since it is the allocation site itself that is recorded
|
|
|
|
// as an alloc site.
|
|
|
|
for (size_t J = 0; J < Frames.size(); J++) {
|
|
|
|
if (I == 0 && J == 0)
|
|
|
|
continue;
|
|
|
|
// We attach the entire bottom-up frame here for the callsite even
|
|
|
|
// though we only need the frames up to and including the frame for
|
|
|
|
// Frames[J].Function. This will enable better deduplication for
|
|
|
|
// compression in the future.
|
2022-03-22 10:39:24 +08:00
|
|
|
const GlobalValue::GUID Guid = idToFrame(Frames[J]).Function;
|
|
|
|
PerFunctionCallSites[Guid].insert(&Frames);
|
2022-03-23 05:40:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add all the frames to the current allocation callstack.
|
|
|
|
Callstack.append(Frames.begin(), Frames.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// We attach the memprof record to each function bottom-up including the
|
|
|
|
// first non-inline frame.
|
|
|
|
for (size_t I = 0; /*Break out using the condition below*/; I++) {
|
2022-03-22 10:39:24 +08:00
|
|
|
const Frame &F = idToFrame(Callstack[I]);
|
2022-03-23 05:40:02 +08:00
|
|
|
auto Result =
|
2022-03-22 10:39:24 +08:00
|
|
|
FunctionProfileData.insert({F.Function, IndexedMemProfRecord()});
|
|
|
|
IndexedMemProfRecord &Record = Result.first->second;
|
2022-03-23 05:40:02 +08:00
|
|
|
Record.AllocSites.emplace_back(Callstack, Entry.second);
|
|
|
|
|
2022-03-22 10:39:24 +08:00
|
|
|
if (!F.IsInlineFrame)
|
2022-03-23 05:40:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in the related callsites per function.
|
|
|
|
for (auto I = PerFunctionCallSites.begin(), E = PerFunctionCallSites.end();
|
|
|
|
I != E; I++) {
|
|
|
|
const GlobalValue::GUID Id = I->first;
|
|
|
|
// Some functions may have only callsite data and no allocation data. Here
|
|
|
|
// we insert a new entry for callsite data if we need to.
|
2022-03-22 10:39:24 +08:00
|
|
|
auto Result = FunctionProfileData.insert({Id, IndexedMemProfRecord()});
|
|
|
|
IndexedMemProfRecord &Record = Result.first->second;
|
2022-03-23 05:40:02 +08:00
|
|
|
for (LocationPtr Loc : I->getSecond()) {
|
|
|
|
Record.CallSites.push_back(*Loc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Error::success();
|
2022-02-24 04:37:42 +08:00
|
|
|
}
|
|
|
|
|
2022-03-03 09:28:09 +08:00
|
|
|
Error RawMemProfReader::symbolizeAndFilterStackFrames() {
|
2022-02-24 04:37:42 +08:00
|
|
|
// The specifier to use when symbolization is requested.
|
|
|
|
const DILineInfoSpecifier Specifier(
|
|
|
|
DILineInfoSpecifier::FileLineInfoKind::RawValue,
|
|
|
|
DILineInfoSpecifier::FunctionNameKind::LinkageName);
|
|
|
|
|
2022-03-03 09:28:09 +08:00
|
|
|
// For entries where all PCs in the callstack are discarded, we erase the
|
|
|
|
// entry from the stack map.
|
|
|
|
llvm::SmallVector<uint64_t> EntriesToErase;
|
|
|
|
// We keep track of all prior discarded entries so that we can avoid invoking
|
|
|
|
// the symbolizer for such entries.
|
|
|
|
llvm::DenseSet<uint64_t> AllVAddrsToDiscard;
|
|
|
|
for (auto &Entry : StackMap) {
|
2022-02-24 04:37:42 +08:00
|
|
|
for (const uint64_t VAddr : Entry.getSecond()) {
|
2022-03-03 09:28:09 +08:00
|
|
|
// Check if we have already symbolized and cached the result or if we
|
|
|
|
// don't want to attempt symbolization since we know this address is bad.
|
|
|
|
// In this case the address is also removed from the current callstack.
|
|
|
|
if (SymbolizedFrame.count(VAddr) > 0 ||
|
|
|
|
AllVAddrsToDiscard.contains(VAddr))
|
2022-02-24 04:37:42 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
Expected<DIInliningInfo> DIOr = Symbolizer->symbolizeInlinedCode(
|
|
|
|
getModuleOffset(VAddr), Specifier, /*UseSymbolTable=*/false);
|
|
|
|
if (!DIOr)
|
|
|
|
return DIOr.takeError();
|
|
|
|
DIInliningInfo DI = DIOr.get();
|
|
|
|
|
2022-03-03 09:28:09 +08:00
|
|
|
// Drop frames which we can't symbolize or if they belong to the runtime.
|
|
|
|
if (DI.getFrame(0).FunctionName == DILineInfo::BadString ||
|
|
|
|
isRuntimePath(DI.getFrame(0).FileName)) {
|
|
|
|
AllVAddrsToDiscard.insert(VAddr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-03-17 01:31:55 +08:00
|
|
|
for (size_t I = 0, NumFrames = DI.getNumberOfFrames(); I < NumFrames;
|
|
|
|
I++) {
|
2022-03-22 10:39:24 +08:00
|
|
|
const auto &DIFrame = DI.getFrame(I);
|
2022-05-25 07:58:36 +08:00
|
|
|
const uint64_t Guid =
|
|
|
|
IndexedMemProfRecord::getGUID(DIFrame.FunctionName);
|
|
|
|
const Frame F(Guid, DIFrame.Line - DIFrame.StartLine, DIFrame.Column,
|
2022-03-22 10:39:24 +08:00
|
|
|
// Only the last entry is not an inlined location.
|
|
|
|
I != NumFrames - 1);
|
2022-05-25 07:58:36 +08:00
|
|
|
// Here we retain a mapping from the GUID to symbol name instead of
|
|
|
|
// adding it to the frame object directly to reduce memory overhead.
|
|
|
|
// This is because there can be many unique frames, particularly for
|
|
|
|
// callsite frames.
|
|
|
|
if (KeepSymbolName)
|
|
|
|
GuidToSymbolName.insert({Guid, DIFrame.FunctionName});
|
|
|
|
|
|
|
|
const FrameId Hash = F.hash();
|
|
|
|
IdToFrame.insert({Hash, F});
|
|
|
|
SymbolizedFrame[VAddr].push_back(Hash);
|
2022-02-24 04:37:42 +08:00
|
|
|
}
|
|
|
|
}
|
2022-03-03 09:28:09 +08:00
|
|
|
|
|
|
|
auto &CallStack = Entry.getSecond();
|
|
|
|
CallStack.erase(std::remove_if(CallStack.begin(), CallStack.end(),
|
|
|
|
[&AllVAddrsToDiscard](const uint64_t A) {
|
|
|
|
return AllVAddrsToDiscard.contains(A);
|
|
|
|
}),
|
|
|
|
CallStack.end());
|
|
|
|
if (CallStack.empty())
|
|
|
|
EntriesToErase.push_back(Entry.getFirst());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop the entries where the callstack is empty.
|
|
|
|
for (const uint64_t Id : EntriesToErase) {
|
|
|
|
StackMap.erase(Id);
|
2022-03-23 05:40:02 +08:00
|
|
|
CallstackProfileData.erase(Id);
|
2022-02-24 04:37:42 +08:00
|
|
|
}
|
2022-03-03 09:28:09 +08:00
|
|
|
|
|
|
|
if (StackMap.empty())
|
|
|
|
return make_error<InstrProfError>(
|
|
|
|
instrprof_error::malformed,
|
|
|
|
"no entries in callstack map after symbolization");
|
|
|
|
|
2022-02-24 04:37:42 +08:00
|
|
|
return Error::success();
|
2022-02-05 03:11:47 +08:00
|
|
|
}
|
|
|
|
|
2022-06-02 06:20:53 +08:00
|
|
|
Error RawMemProfReader::readRawProfile(
|
|
|
|
std::unique_ptr<MemoryBuffer> DataBuffer) {
|
2022-02-05 03:11:47 +08:00
|
|
|
const char *Next = DataBuffer->getBufferStart();
|
|
|
|
|
|
|
|
while (Next < DataBuffer->getBufferEnd()) {
|
|
|
|
auto *Header = reinterpret_cast<const memprof::Header *>(Next);
|
|
|
|
|
|
|
|
// Read in the segment information, check whether its the same across all
|
|
|
|
// profiles in this binary file.
|
|
|
|
const llvm::SmallVector<SegmentEntry> Entries =
|
|
|
|
readSegmentEntries(Next + Header->SegmentOffset);
|
|
|
|
if (!SegmentInfo.empty() && SegmentInfo != Entries) {
|
|
|
|
// We do not expect segment information to change when deserializing from
|
|
|
|
// the same binary profile file. This can happen if dynamic libraries are
|
|
|
|
// loaded/unloaded between profile dumping.
|
|
|
|
return make_error<InstrProfError>(
|
|
|
|
instrprof_error::malformed,
|
|
|
|
"memprof raw profile has different segment information");
|
|
|
|
}
|
|
|
|
SegmentInfo.assign(Entries.begin(), Entries.end());
|
|
|
|
|
|
|
|
// Read in the MemInfoBlocks. Merge them based on stack id - we assume that
|
|
|
|
// raw profiles in the same binary file are from the same process so the
|
|
|
|
// stackdepot ids are the same.
|
|
|
|
for (const auto &Value : readMemInfoBlocks(Next + Header->MIBOffset)) {
|
2022-03-23 05:40:02 +08:00
|
|
|
if (CallstackProfileData.count(Value.first)) {
|
|
|
|
CallstackProfileData[Value.first].Merge(Value.second);
|
2022-02-05 03:11:47 +08:00
|
|
|
} else {
|
2022-03-23 05:40:02 +08:00
|
|
|
CallstackProfileData[Value.first] = Value.second;
|
2022-02-05 03:11:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read in the callstack for each ids. For multiple raw profiles in the same
|
|
|
|
// file, we expect that the callstack is the same for a unique id.
|
|
|
|
const CallStackMap CSM = readStackInfo(Next + Header->StackOffset);
|
|
|
|
if (StackMap.empty()) {
|
|
|
|
StackMap = CSM;
|
|
|
|
} else {
|
|
|
|
if (mergeStackMap(CSM, StackMap))
|
|
|
|
return make_error<InstrProfError>(
|
|
|
|
instrprof_error::malformed,
|
|
|
|
"memprof raw profile got different call stack for same id");
|
|
|
|
}
|
|
|
|
|
|
|
|
Next += Header->TotalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Error::success();
|
|
|
|
}
|
|
|
|
|
|
|
|
object::SectionedAddress
|
|
|
|
RawMemProfReader::getModuleOffset(const uint64_t VirtualAddress) {
|
2022-02-09 07:45:34 +08:00
|
|
|
LLVM_DEBUG({
|
2022-02-05 03:11:47 +08:00
|
|
|
SegmentEntry *ContainingSegment = nullptr;
|
|
|
|
for (auto &SE : SegmentInfo) {
|
|
|
|
if (VirtualAddress > SE.Start && VirtualAddress <= SE.End) {
|
|
|
|
ContainingSegment = &SE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the virtual address is valid.
|
|
|
|
assert(ContainingSegment && "Could not find a segment entry");
|
2022-02-09 07:45:34 +08:00
|
|
|
});
|
2022-02-05 03:11:47 +08:00
|
|
|
|
|
|
|
// TODO: Compute the file offset based on the maps and program headers. For
|
|
|
|
// now this only works for non PIE binaries.
|
|
|
|
return object::SectionedAddress{VirtualAddress};
|
|
|
|
}
|
|
|
|
|
2022-03-23 05:40:02 +08:00
|
|
|
Error RawMemProfReader::readNextRecord(GuidMemProfRecordPair &GuidRecord) {
|
|
|
|
if (FunctionProfileData.empty())
|
2022-02-05 03:11:47 +08:00
|
|
|
return make_error<InstrProfError>(instrprof_error::empty_raw_profile);
|
|
|
|
|
2022-03-23 05:40:02 +08:00
|
|
|
if (Iter == FunctionProfileData.end())
|
2022-02-05 03:11:47 +08:00
|
|
|
return make_error<InstrProfError>(instrprof_error::eof);
|
|
|
|
|
2022-03-22 10:39:24 +08:00
|
|
|
auto IdToFrameCallback = [this](const FrameId Id) {
|
2022-05-25 07:58:36 +08:00
|
|
|
Frame F = this->idToFrame(Id);
|
|
|
|
if (!this->KeepSymbolName)
|
|
|
|
return F;
|
|
|
|
auto Iter = this->GuidToSymbolName.find(F.Function);
|
|
|
|
assert(Iter != this->GuidToSymbolName.end());
|
|
|
|
F.SymbolName = Iter->getSecond();
|
|
|
|
return F;
|
2022-03-22 10:39:24 +08:00
|
|
|
};
|
2022-05-25 07:58:36 +08:00
|
|
|
|
2022-03-22 10:39:24 +08:00
|
|
|
const IndexedMemProfRecord &IndexedRecord = Iter->second;
|
|
|
|
GuidRecord = {Iter->first, MemProfRecord(IndexedRecord, IdToFrameCallback)};
|
2022-02-05 03:11:47 +08:00
|
|
|
Iter++;
|
|
|
|
return Error::success();
|
|
|
|
}
|
2021-11-20 06:02:41 +08:00
|
|
|
} // namespace memprof
|
|
|
|
} // namespace llvm
|