[memprof] Add a raw binary format to serialize memprof profiles.

This change implements the raw binary format discussed in
https://lists.llvm.org/pipermail/llvm-dev/2021-September/153007.html

Summary of changes
* Add a new memprof option to choose binary or text (default) format.
* Add a rawprofile library which serializes the MIB map to profile.
* Add a unit test for rawprofile.
* Mark sanitizer procmaps methods as virtual to be able to mock them.
* Extend memprof_profile_dump regression test.

Differential Revision: https://reviews.llvm.org/D113317
This commit is contained in:
Snehasish Kumar 2021-10-12 11:30:23 -07:00
parent 1243cef245
commit 545866cb05
10 changed files with 607 additions and 27 deletions

View File

@ -10,6 +10,7 @@ set(MEMPROF_SOURCES
memprof_malloc_linux.cpp
memprof_mibmap.cpp
memprof_posix.cpp
memprof_rawprofile.cpp
memprof_rtl.cpp
memprof_shadow_setup.cpp
memprof_stack.cpp
@ -38,6 +39,7 @@ SET(MEMPROF_HEADERS
memprof_mapping.h
memprof_meminfoblock.h
memprof_mibmap.h
memprof_rawprofile.h
memprof_stack.h
memprof_stats.h
memprof_thread.h
@ -195,3 +197,8 @@ foreach(arch ${MEMPROF_SUPPORTED_ARCH})
add_dependencies(memprof clang_rt.memprof-${arch}-symbols)
endif()
endforeach()
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
endif()

View File

@ -17,6 +17,7 @@
#include "memprof_mapping.h"
#include "memprof_meminfoblock.h"
#include "memprof_mibmap.h"
#include "memprof_rawprofile.h"
#include "memprof_stack.h"
#include "memprof_thread.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
@ -27,7 +28,9 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_vector.h"
#include <sched.h>
#include <time.h>
@ -220,13 +223,20 @@ struct Allocator {
// Holds the mapping of stack ids to MemInfoBlocks.
MIBMapTy MIBMap;
bool destructing;
bool constructed = false;
atomic_uint8_t destructing;
atomic_uint8_t constructed;
bool print_text;
// ------------------- Initialization ------------------------
explicit Allocator(LinkerInitialized)
: destructing(false), constructed(true) {}
~Allocator() { FinishAndPrint(); }
explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) {
atomic_store_relaxed(&destructing, 0);
atomic_store_relaxed(&constructed, 1);
}
~Allocator() {
atomic_store_relaxed(&destructing, 1);
FinishAndWrite();
}
static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value,
void *Arg) {
@ -234,12 +244,36 @@ struct Allocator {
Value->mib.Print(Key, bool(Arg));
}
void FinishAndPrint() {
if (common_flags()->print_module_map)
void FinishAndWrite() {
if (print_text && common_flags()->print_module_map)
DumpProcessMap();
if (!flags()->print_terse)
Printf("Live on exit:\n");
allocator.ForceLock();
InsertLiveBlocks();
if (print_text) {
MIBMap.ForEach(PrintCallback,
reinterpret_cast<void *>(flags()->print_terse));
StackDepotPrintAll();
} else {
// Serialize the contents to a raw profile. Format documented in
// memprof_rawprofile.h.
char *Buffer = nullptr;
MemoryMappingLayout Layout(/*cache_enabled=*/true);
u64 BytesSerialized = SerializeToRawProfile(MIBMap, Layout, Buffer);
CHECK(Buffer && BytesSerialized && "could not serialize to buffer");
report_file.Write(Buffer, BytesSerialized);
}
allocator.ForceUnlock();
}
// Inserts any blocks which have been allocated but not yet deallocated.
void InsertLiveBlocks() {
if (print_text && !flags()->print_terse)
Printf("Live on exit:\n");
allocator.ForEachChunk(
[](uptr chunk, void *alloc) {
u64 user_requested_size;
@ -256,12 +290,6 @@ struct Allocator {
InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap);
},
this);
destructing = true;
MIBMap.ForEach(PrintCallback,
reinterpret_cast<void *>(flags()->print_terse));
StackDepotPrintAll();
allocator.ForceUnlock();
}
void InitLinkerInitialized() {
@ -393,7 +421,9 @@ struct Allocator {
u64 user_requested_size =
atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
if (memprof_inited && memprof_init_done && constructed && !destructing) {
if (memprof_inited && memprof_init_done &&
atomic_load_relaxed(&constructed) &&
!atomic_load_relaxed(&destructing)) {
u64 c = GetShadowCount(p, user_requested_size);
long curtime = GetTimestamp();
@ -666,7 +696,7 @@ uptr __sanitizer_get_allocated_size(const void *p) {
}
int __memprof_profile_dump() {
instance.FinishAndPrint();
instance.FinishAndWrite();
// In the future we may want to return non-zero if there are any errors
// detected during the dumping process.
return 0;

View File

@ -35,5 +35,7 @@ MEMPROF_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
"realloc(p, 0) is equivalent to free(p) by default (Same as the "
"POSIX standard). If set to false, realloc(p, 0) will return a "
"pointer to an allocated space which can not be used.")
MEMPROF_FLAG(bool, print_text, true,
"If set, prints the heap profile in text format. Else use the raw binary serialization format.")
MEMPROF_FLAG(bool, print_terse, false,
"If set, prints memory profile in a terse format.")
"If set, prints memory profile in a terse format. Only applicable if print_text = true.")

View File

@ -0,0 +1,250 @@
#include "memprof_rawprofile.h"
#include "memprof_meminfoblock.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stackdepotbase.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_vector.h"
#include <stdlib.h>
#include <string.h>
namespace __memprof {
using ::__sanitizer::Vector;
namespace {
typedef struct __attribute__((__packed__)) {
u64 start;
u64 end;
u64 offset;
u8 buildId[32];
} SegmentEntry;
typedef struct __attribute__((__packed__)) {
u64 magic;
u64 version;
u64 total_size;
u64 segment_offset;
u64 mib_offset;
u64 stack_offset;
} Header;
template <class T> char *WriteBytes(T Pod, char *&Buffer) {
*(T *)Buffer = Pod;
return Buffer + sizeof(T);
}
void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB,
void *Arg) {
// No need to touch the MIB value here since we are only recording the key.
auto *StackIds = reinterpret_cast<Vector<u64> *>(Arg);
StackIds->PushBack(Key);
}
} // namespace
u64 SegmentSizeBytes(MemoryMappingLayoutBase &Layout) {
u64 NumSegmentsToRecord = 0;
MemoryMappedSegment segment;
for (Layout.Reset(); Layout.Next(&segment);)
if (segment.IsReadable() && segment.IsExecutable())
NumSegmentsToRecord++;
return sizeof(u64) // A header which stores the number of records.
+ sizeof(SegmentEntry) * NumSegmentsToRecord;
}
// The segment section uses the following format:
// ---------- Segment Info
// Num Entries
// ---------- Segment Entry
// Start
// End
// Offset
// BuildID 32B
// ----------
// ...
void SerializeSegmentsToBuffer(MemoryMappingLayoutBase &Layout,
const u64 ExpectedNumBytes, char *&Buffer) {
char *Ptr = Buffer;
// Reserve space for the final count.
Ptr += sizeof(u64);
u64 NumSegmentsRecorded = 0;
MemoryMappedSegment segment;
for (Layout.Reset(); Layout.Next(&segment);) {
if (segment.IsReadable() && segment.IsExecutable()) {
SegmentEntry entry{};
entry.start = segment.start;
entry.end = segment.end;
entry.offset = segment.offset;
memcpy(entry.buildId, segment.uuid, sizeof(segment.uuid));
memcpy(Ptr, &entry, sizeof(SegmentEntry));
Ptr += sizeof(SegmentEntry);
NumSegmentsRecorded++;
}
}
// Store the number of segments we recorded in the space we reserved.
*((u64 *)Buffer) = NumSegmentsRecorded;
CHECK(ExpectedNumBytes == static_cast<u64>(Ptr - Buffer) &&
"Expected num bytes != actual bytes written");
}
u64 StackSizeBytes(const Vector<u64> &StackIds) {
u64 NumBytesToWrite = sizeof(u64);
const u64 NumIds = StackIds.Size();
for (unsigned k = 0; k < NumIds; ++k) {
const u64 Id = StackIds[k];
// One entry for the id and then one more for the number of stack pcs.
NumBytesToWrite += 2 * sizeof(u64);
const StackTrace St = StackDepotGet(Id);
CHECK(St.trace != nullptr && St.size > 0 && "Empty stack trace");
for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
NumBytesToWrite += sizeof(u64);
}
}
return NumBytesToWrite;
}
// The stack info section uses the following format:
//
// ---------- Stack Info
// Num Entries
// ---------- Stack Entry
// Num Stacks
// PC1
// PC2
// ...
// ----------
void SerializeStackToBuffer(const Vector<u64> &StackIds,
const u64 ExpectedNumBytes, char *&Buffer) {
const u64 NumIds = StackIds.Size();
char *Ptr = Buffer;
Ptr = WriteBytes(static_cast<u64>(NumIds), Ptr);
for (unsigned k = 0; k < NumIds; ++k) {
const u64 Id = StackIds[k];
Ptr = WriteBytes(Id, Ptr);
Ptr += sizeof(u64); // Bump it by u64, we will fill this in later.
u64 Count = 0;
const StackTrace St = StackDepotGet(Id);
for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = StackTrace::GetPreviousInstructionPc(St.trace[i]);
Ptr = WriteBytes(static_cast<u64>(pc), Ptr);
++Count;
}
// Store the count in the space we reserved earlier.
*(u64 *)(Ptr - (Count + 1) * sizeof(u64)) = Count;
}
CHECK(ExpectedNumBytes == static_cast<u64>(Ptr - Buffer) &&
"Expected num bytes != actual bytes written");
}
// The MIB section has the following format:
// ---------- MIB Info
// Num Entries
// ---------- MIB Entry 0
// Alloc Count
// ...
// ---------- MIB Entry 1
// Alloc Count
// ...
// ----------
void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds,
const u64 ExpectedNumBytes, char *&Buffer) {
char *Ptr = Buffer;
const u64 NumEntries = StackIds.Size();
Ptr = WriteBytes(NumEntries, Ptr);
for (u64 i = 0; i < NumEntries; i++) {
const u64 Key = StackIds[i];
MIBMapTy::Handle h(&MIBMap, Key, /*remove=*/true, /*create=*/false);
CHECK(h.exists());
Ptr = WriteBytes(Key, Ptr);
Ptr = WriteBytes((*h)->mib, Ptr);
}
CHECK(ExpectedNumBytes == static_cast<u64>(Ptr - Buffer) &&
"Expected num bytes != actual bytes written");
}
// Format
// ---------- Header
// Magic
// Version
// Total Size
// Segment Offset
// MIB Info Offset
// Stack Offset
// ---------- Segment Info
// Num Entries
// ---------- Segment Entry
// Start
// End
// Offset
// BuildID 32B
// ----------
// ...
// ---------- MIB Info
// Num Entries
// ---------- MIB Entry
// Alloc Count
// ...
// ---------- Stack Info
// Num Entries
// ---------- Stack Entry
// Num Stacks
// PC1
// PC2
// ...
// ----------
// ...
u64 SerializeToRawProfile(MIBMapTy &MIBMap, MemoryMappingLayoutBase &Layout,
char *&Buffer) {
const u64 NumSegmentBytes = SegmentSizeBytes(Layout);
Vector<u64> StackIds;
MIBMap.ForEach(RecordStackId, reinterpret_cast<void *>(&StackIds));
// The first 8b are for the total number of MIB records. Each MIB record is
// preceded by a 8b stack id which is associated with stack frames in the next
// section.
const u64 NumMIBInfoBytes =
sizeof(u64) + StackIds.Size() * (sizeof(u64) + sizeof(MemInfoBlock));
const u64 NumStackBytes = StackSizeBytes(StackIds);
const u64 TotalSizeBytes =
sizeof(Header) + NumSegmentBytes + NumStackBytes + NumMIBInfoBytes;
// Allocate the memory for the entire buffer incl. info blocks.
Buffer = (char *)InternalAlloc(TotalSizeBytes);
char *Ptr = Buffer;
Header header{MEMPROF_RAW_MAGIC_64,
MEMPROF_RAW_VERSION,
static_cast<u64>(TotalSizeBytes),
sizeof(Header),
sizeof(Header) + NumSegmentBytes,
sizeof(Header) + NumSegmentBytes + NumMIBInfoBytes};
Ptr = WriteBytes(header, Ptr);
SerializeSegmentsToBuffer(Layout, NumSegmentBytes, Ptr);
Ptr += NumSegmentBytes;
SerializeMIBInfoToBuffer(MIBMap, StackIds, NumMIBInfoBytes, Ptr);
Ptr += NumMIBInfoBytes;
SerializeStackToBuffer(StackIds, NumStackBytes, Ptr);
return TotalSizeBytes;
}
} // namespace __memprof

View File

@ -0,0 +1,21 @@
#ifndef MEMPROF_RAWPROFILE_H_
#define MEMPROF_RAWPROFILE_H_
#include "memprof_mibmap.h"
#include "sanitizer_common/sanitizer_procmaps.h"
namespace __memprof {
// TODO: pull these in from MemProfData.inc
#define MEMPROF_RAW_MAGIC_64 \
(u64)255 << 56 | (u64)'m' << 48 | (u64)'p' << 40 | (u64)'r' << 32 | \
(u64)'o' << 24 | (u64)'f' << 16 | (u64)'r' << 8 | (u64)129
#define MEMPROF_RAW_VERSION 1ULL
u64 SerializeToRawProfile(MIBMapTy &BlockCache, MemoryMappingLayoutBase &Layout,
char *&Buffer);
} // namespace __memprof
#endif // MEMPROF_RAWPROFILE_H_

View File

@ -0,0 +1,52 @@
include(CheckCXXCompilerFlag)
include(CompilerRTCompile)
include(CompilerRTLink)
set(MEMPROF_UNITTEST_CFLAGS
${COMPILER_RT_UNITTEST_CFLAGS}
${COMPILER_RT_GTEST_CFLAGS}
${COMPILER_RT_GMOCK_CFLAGS}
-I${COMPILER_RT_SOURCE_DIR}/lib/
-O2
-g
-fno-rtti
-Wno-gnu-zero-variadic-macro-arguments
-fno-omit-frame-pointer)
file(GLOB MEMPROF_HEADERS ../*.h)
set(MEMPROF_SOURCES
../memprof_mibmap.cpp
../memprof_rawprofile.cpp)
set(MEMPROF_UNITTESTS
rawprofile.cpp
driver.cpp)
set(MEMPROF_UNIT_TEST_HEADERS
${MEMPROF_HEADERS})
if(NOT WIN32)
list(APPEND MEMPROF_UNITTEST_LINK_FLAGS -pthread)
endif()
if(COMPILER_RT_DEFAULT_TARGET_ARCH IN_LIST MEMPROF_SUPPORTED_ARCH)
# MemProf unit tests are only run on the host machine.
set(arch ${COMPILER_RT_DEFAULT_TARGET_ARCH})
add_executable(MemProfUnitTests
${MEMPROF_UNITTESTS}
${COMPILER_RT_GTEST_SOURCE}
${COMPILER_RT_GMOCK_SOURCE}
${MEMPROF_SOURCES}
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>)
set_target_compile_flags(MemProfUnitTests ${MEMPROF_UNITTEST_CFLAGS})
set_target_link_flags(MemProfUnitTests ${MEMPROF_UNITTEST_LINK_FLAGS})
target_link_libraries(MemProfUnitTests dl)
set_target_properties(MemProfUnitTests PROPERTIES
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()

View File

@ -0,0 +1,14 @@
//===-- driver.cpp ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "gtest/gtest.h"
int main(int argc, char **argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,188 @@
#include "memprof/memprof_rawprofile.h"
#include "memprof/memprof_meminfoblock.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <memory>
namespace {
using ::__memprof::MemInfoBlock;
using ::__memprof::MIBMapTy;
using ::__memprof::SerializeToRawProfile;
using ::__sanitizer::MemoryMappedSegment;
using ::__sanitizer::MemoryMappingLayoutBase;
using ::__sanitizer::StackDepotPut;
using ::__sanitizer::StackTrace;
using ::testing::_;
using ::testing::Action;
using ::testing::DoAll;
using ::testing::Return;
using ::testing::SetArgPointee;
class MockMemoryMappingLayout final : public MemoryMappingLayoutBase {
public:
MOCK_METHOD(bool, Next, (MemoryMappedSegment *), (override));
MOCK_METHOD(void, Reset, (), (override));
};
u64 PopulateFakeMap(const MemInfoBlock &FakeMIB, uptr StackPCBegin,
MIBMapTy &FakeMap) {
constexpr int kSize = 5;
uptr array[kSize];
for (int i = 0; i < kSize; i++) {
array[i] = StackPCBegin + i;
}
StackTrace St(array, kSize);
u32 Id = StackDepotPut(St);
InsertOrMerge(Id, FakeMIB, FakeMap);
return Id;
}
template <class T = u64> T Read(char *&Buffer) {
static_assert(std::is_pod<T>::value, "Must be a POD type.");
T t = *reinterpret_cast<T *>(Buffer);
Buffer += sizeof(T);
return t;
}
TEST(MemProf, Basic) {
MockMemoryMappingLayout Layout;
MemoryMappedSegment FakeSegment;
memset(&FakeSegment, 0, sizeof(FakeSegment));
FakeSegment.start = 0x10;
FakeSegment.end = 0x20;
FakeSegment.offset = 0x10;
uint8_t uuid[__sanitizer::kModuleUUIDSize] = {0xC, 0x0, 0xF, 0xF, 0xE, 0xE};
memcpy(FakeSegment.uuid, uuid, __sanitizer::kModuleUUIDSize);
FakeSegment.protection =
__sanitizer::kProtectionExecute | __sanitizer::kProtectionRead;
const Action<bool(MemoryMappedSegment *)> SetSegment =
DoAll(SetArgPointee<0>(FakeSegment), Return(true));
EXPECT_CALL(Layout, Next(_))
.WillOnce(SetSegment)
.WillOnce(Return(false))
.WillOnce(SetSegment)
.WillRepeatedly(Return(false));
EXPECT_CALL(Layout, Reset).Times(2);
MIBMapTy FakeMap;
MemInfoBlock FakeMIB;
// Since we want to override the constructor set vals to make it easier to
// test.
memset(&FakeMIB, 0, sizeof(MemInfoBlock));
FakeMIB.alloc_count = 0x1;
FakeMIB.total_access_count = 0x2;
u64 FakeIds[2];
FakeIds[0] = PopulateFakeMap(FakeMIB, /*StackPCBegin=*/2, FakeMap);
FakeIds[1] = PopulateFakeMap(FakeMIB, /*StackPCBegin=*/3, FakeMap);
char *Ptr = nullptr;
u64 NumBytes = SerializeToRawProfile(FakeMap, Layout, Ptr);
const char *Buffer = Ptr;
ASSERT_GT(NumBytes, 0ULL);
ASSERT_TRUE(Ptr);
// Check the header.
EXPECT_THAT(Read(Ptr), MEMPROF_RAW_MAGIC_64);
EXPECT_THAT(Read(Ptr), MEMPROF_RAW_VERSION);
const u64 TotalSize = Read(Ptr);
const u64 SegmentOffset = Read(Ptr);
const u64 MIBOffset = Read(Ptr);
const u64 StackOffset = Read(Ptr);
// ============= Check sizes.
EXPECT_EQ(TotalSize, NumBytes);
// Should be equal to the size of the raw profile header.
EXPECT_EQ(SegmentOffset, 48ULL);
// We expect only 1 segment entry, 8b for the count and 56b for SegmentEntry
// in memprof_rawprofile.cpp.
EXPECT_EQ(MIBOffset - SegmentOffset, 64ULL);
EXPECT_EQ(MIBOffset, 112ULL);
// We expect 2 mib entry, 8b for the count and sizeof(u64) +
// sizeof(MemInfoBlock) contains stack id + MeminfoBlock.
EXPECT_EQ(StackOffset - MIBOffset, 8 + 2 * (8 + sizeof(MemInfoBlock)));
EXPECT_EQ(StackOffset, 336ULL);
// We expect 2 stack entries, with 5 frames - 8b for total count,
// 2 * (8b for id, 8b for frame count and 5*8b for fake frames)
EXPECT_EQ(TotalSize - StackOffset, 8ULL + 2 * (8 + 8 + 5 * 8));
// ============= Check contents.
unsigned char ExpectedSegmentBytes[64] = {
0x01, 0, 0, 0, 0, 0, 0, 0, // Number of entries
0x10, 0, 0, 0, 0, 0, 0, 0, // Start
0x20, 0, 0, 0, 0, 0, 0, 0, // End
0x10, 0, 0, 0, 0, 0, 0, 0, // Offset
0x0C, 0x0, 0xF, 0xF, 0xE, 0xE, // Uuid
};
EXPECT_EQ(memcmp(Buffer + SegmentOffset, ExpectedSegmentBytes, 64), 0);
// Check that the number of entries is 2.
EXPECT_EQ(*reinterpret_cast<const u64 *>(Buffer + MIBOffset), 2ULL);
// Check that stack id is set.
EXPECT_EQ(*reinterpret_cast<const u64 *>(Buffer + MIBOffset + 8), FakeIds[0]);
// Only check a few fields of the first MemInfoBlock.
unsigned char ExpectedMIBBytes[sizeof(MemInfoBlock)] = {
0x01, 0, 0, 0, // Alloc count
0x02, 0, 0, 0, // Total access count
};
// Compare contents of 1st MIB after skipping count and stack id.
EXPECT_EQ(
memcmp(Buffer + MIBOffset + 16, ExpectedMIBBytes, sizeof(MemInfoBlock)),
0);
// Compare contents of 2nd MIB after skipping count and stack id for the first
// and only the id for the second.
EXPECT_EQ(memcmp(Buffer + MIBOffset + 16 + sizeof(MemInfoBlock) + 8,
ExpectedMIBBytes, sizeof(MemInfoBlock)),
0);
// Check that the number of entries is 2.
EXPECT_EQ(*reinterpret_cast<const u64 *>(Buffer + StackOffset), 2ULL);
// Check that the 1st stack id is set.
EXPECT_EQ(*reinterpret_cast<const u64 *>(Buffer + StackOffset + 8),
FakeIds[0]);
// Contents are num pcs, value of each pc - 1.
unsigned char ExpectedStackBytes[2][6 * 8] = {
{
0x5, 0, 0, 0, 0, 0, 0, 0, // Number of PCs
0x1, 0, 0, 0, 0, 0, 0, 0, // PC ...
0x2, 0, 0, 0, 0, 0, 0, 0, 0x3, 0, 0, 0, 0, 0, 0, 0,
0x4, 0, 0, 0, 0, 0, 0, 0, 0x5, 0, 0, 0, 0, 0, 0, 0,
},
{
0x5, 0, 0, 0, 0, 0, 0, 0, // Number of PCs
0x2, 0, 0, 0, 0, 0, 0, 0, // PC ...
0x3, 0, 0, 0, 0, 0, 0, 0, 0x4, 0, 0, 0, 0, 0, 0, 0,
0x5, 0, 0, 0, 0, 0, 0, 0, 0x6, 0, 0, 0, 0, 0, 0, 0,
},
};
EXPECT_EQ(memcmp(Buffer + StackOffset + 16, ExpectedStackBytes[0],
sizeof(ExpectedStackBytes[0])),
0);
// Check that the 2nd stack id is set.
EXPECT_EQ(
*reinterpret_cast<const u64 *>(Buffer + StackOffset + 8 + 6 * 8 + 8),
FakeIds[1]);
EXPECT_EQ(memcmp(Buffer + StackOffset + 16 + 6 * 8 + 8, ExpectedStackBytes[1],
sizeof(ExpectedStackBytes[1])),
0);
}
} // namespace

View File

@ -65,13 +65,23 @@ class MemoryMappedSegment {
MemoryMappedSegmentData *data_;
};
class MemoryMappingLayout {
class MemoryMappingLayoutBase {
public:
virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
virtual bool Error() const { UNIMPLEMENTED(); };
virtual void Reset() { UNIMPLEMENTED(); }
protected:
~MemoryMappingLayoutBase() {}
};
class MemoryMappingLayout final : public MemoryMappingLayoutBase {
public:
explicit MemoryMappingLayout(bool cache_enabled);
~MemoryMappingLayout();
bool Next(MemoryMappedSegment *segment);
bool Error() const;
void Reset();
virtual bool Next(MemoryMappedSegment *segment) override;
virtual bool Error() const override;
virtual void Reset() override;
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.

View File

@ -1,6 +1,8 @@
// RUN: %clangxx_memprof %s -o %t
// RUN: %env_memprof_opts=log_path=stdout %run %t | FileCheck %s
// RUN: %env_memprof_opts=log_path=stdout %run %t | FileCheck --check-prefix=CHECK-TEXT %s
// RUN: %env_memprof_opts=log_path=stdout,print_text=false %run %t > %t.memprofraw
// RUN: od -c -N 8 %t.memprofraw | FileCheck --check-prefix=CHECK-RAW %s
#include <sanitizer/memprof_interface.h>
#include <stdlib.h>
@ -17,7 +19,11 @@ int main(int argc, char **argv) {
}
// We should get 2 rounds of profile info, one from the explicit dump request,
// and one at exit.
// CHECK: Memory allocation stack id
// CHECK: Stack for id
// CHECK: Memory allocation stack id
// CHECK: Stack for id
// CHECK-TEXT: Memory allocation stack id
// CHECK-TEXT: Stack for id
// CHECK-TEXT: Memory allocation stack id
// CHECK-TEXT: Stack for id
//
// For the raw profile just check the header magic. The following check assumes that memprof
// runs on little endian architectures.
// CHECK-RAW: 0000000 201 r f o r p m 377