[libsanitizer] Unmapping the old cache partially invalidates the memory layout, so add

a flag to skip cache update for cases when that's unacceptable (e.g. lsan).

Patch by Sergey Matveev (earthdok@google.com)

llvm-svn: 178000
This commit is contained in:
Alexander Potapenko 2013-03-26 10:34:37 +00:00
parent 90b45124cd
commit f8109dd0f8
7 changed files with 23 additions and 20 deletions

View File

@ -70,7 +70,7 @@ namespace __msan {
static bool IsRunningUnderDr() {
bool result = false;
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
while (proc_maps.Next(/* start */0, /* end */0, /* file_offset */0,

View File

@ -200,7 +200,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset;
uptr prev_end = 0;
while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
@ -341,18 +341,22 @@ void PrepareForSandboxing() {
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
MemoryMappingLayout::MemoryMappingLayout() {
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
proc_self_maps_.len =
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
&proc_self_maps_.mmaped_size, 1 << 26);
if (proc_self_maps_.mmaped_size == 0) {
LoadFromCache();
CHECK_GT(proc_self_maps_.len, 0);
if (cache_enabled) {
if (proc_self_maps_.mmaped_size == 0) {
LoadFromCache();
CHECK_GT(proc_self_maps_.len, 0);
}
} else {
CHECK_GT(proc_self_maps_.mmaped_size, 0);
}
// internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
CacheMemoryMappings();
if (cache_enabled)
CacheMemoryMappings();
}
MemoryMappingLayout::~MemoryMappingLayout() {
@ -643,7 +647,6 @@ int internal_sigaltstack(const struct sigaltstack *ss,
return syscall(__NR_sigaltstack, ss, oss);
}
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
: pid_(pid),

View File

@ -165,7 +165,7 @@ void PrepareForSandboxing() {
// ----------------- sanitizer_procmaps.h
MemoryMappingLayout::MemoryMappingLayout() {
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}

View File

@ -151,11 +151,11 @@ static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
// several worker threads on Mac, which aren't expected to map big chunks of
// memory).
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout procmaps;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
while (procmaps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
/*protection*/0)) {
while (proc_maps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
/*protection*/0)) {
if (!IntervalsAreSeparate(start, end, range_start, range_end))
return false;
}
@ -163,7 +163,7 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
}
void DumpProcessMap() {
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);

View File

@ -41,7 +41,7 @@ struct ProcSelfMapsBuff {
class MemoryMappingLayout {
public:
MemoryMappingLayout();
explicit MemoryMappingLayout(bool cache_enabled);
bool Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection);
void Reset();

View File

@ -64,7 +64,7 @@ static void PrintModuleAndOffset(const char *module, uptr offset,
void StackTrace::PrintStack(const uptr *addr, uptr size,
bool symbolize, const char *strip_file_prefix,
SymbolizeCallback symbolize_callback ) {
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
InternalScopedBuffer<AddressInfo> addr_frames(64);
uptr frame_num = 0;

View File

@ -198,7 +198,7 @@ static void MapRodata() {
return;
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
char name[128];
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
@ -257,7 +257,7 @@ static uptr g_data_end;
#ifndef TSAN_GO
static void CheckPIE() {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(true);
uptr start, end;
if (proc_maps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
@ -274,7 +274,7 @@ static void CheckPIE() {
}
static void InitDataSeg() {
MemoryMappingLayout proc_maps;
MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
bool prev_is_data = false;