tsan: capture shadow map start/end on init and reuse in reset

Capture the computed shadow begin/end values at the point where the
shadow is first created and reuse those values on reset. Introduce new
windows-specific function "ZeroMmapFixedRegion" for zeroing out an
address space region previously returned by one of the MmapFixed*
routines; call this function (on windows) from DoResetImpl
tsan_rtl.cpp instead of MmapFixedSuperNoReserve.

See https://github.com/golang/go/issues/53539#issuecomment-1168778740
for context; intended to help with updating the syso for Go's
windows/amd64 race detector.

Differential Revision: https://reviews.llvm.org/D128909
This commit is contained in:
Than McIntosh 2022-06-30 09:31:17 -04:00
parent f2b7f18e63
commit b6374437af
6 changed files with 76 additions and 7 deletions

View File

@ -120,6 +120,11 @@ bool MprotectReadOnly(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
#if SANITIZER_WINDOWS
// Zero previously mmap'd memory. Currently used only on Windows.
bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
#endif
#if SANITIZER_LINUX
// Unmap memory. Currently only used on Linux.
void UnmapFromTo(uptr from, uptr to);

View File

@ -234,6 +234,17 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
return (void *)mapped_addr;
}
// ZeroMmapFixedRegion zero's out a region of memory previously returned from a
// call to one of the MmapFixed* helpers. On non-windows systems this would be
// done with another mmap, but on windows remapping is not an option.
// VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the
// memory, but we can't do this atomically, so instead we fall back to using
// internal_memset.
bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) {
internal_memset((void*) fixed_addr, 0, size);
return true;
}
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.

View File

@ -57,6 +57,7 @@ gcc ^
-Wno-format ^
-Wno-maybe-uninitialized ^
-DSANITIZER_DEBUG=0 ^
-DSANITIZER_WINDOWS=1 ^
-O3 ^
-fomit-frame-pointer ^
-msse3 ^

View File

@ -394,6 +394,7 @@ struct MappingGo48 {
0300 0000 0000 - 0700 0000 0000: -
0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
PIE binaries currently not supported, but it should be theoretically possible.
*/
struct MappingGoWindows {

View File

@ -197,8 +197,24 @@ static void DoResetImpl(uptr epoch) {
}
DPrintf("Resetting shadow...\n");
if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
"shadow")) {
auto shadow_begin = ShadowBeg();
auto shadow_end = ShadowEnd();
#if SANITIZER_GO
CHECK_NE(0, ctx->mapped_shadow_begin);
shadow_begin = ctx->mapped_shadow_begin;
shadow_end = ctx->mapped_shadow_end;
VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
shadow_begin, shadow_end);
#endif
#if SANITIZER_WINDOWS
auto resetFailed =
!ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
#else
auto resetFailed =
!MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
#endif
if (resetFailed) {
Printf("failed to reset shadow memory\n");
Die();
}
@ -557,18 +573,50 @@ void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
#endif
void MapShadow(uptr addr, uptr size) {
// Ensure thead registry lock held, so as to synchronize
// with DoReset, which also access the mapped_shadow_* ctxt fields.
ThreadRegistryLock lock0(&ctx->thread_registry);
static bool data_mapped = false;
#if !SANITIZER_GO
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
"shadow"))
if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
Die();
#else
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
addr, addr + size, shadow_begin, shadow_end);
if (!data_mapped) {
// First call maps data+bss.
if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
Die();
} else {
VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
// Second and subsequent calls map heap.
if (shadow_end <= ctx->mapped_shadow_end)
return;
if (ctx->mapped_shadow_begin < shadow_begin)
ctx->mapped_shadow_begin = shadow_begin;
if (shadow_begin < ctx->mapped_shadow_end)
shadow_begin = ctx->mapped_shadow_end;
VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
shadow_begin, shadow_end);
if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
"shadow"))
Die();
ctx->mapped_shadow_end = shadow_end;
}
#endif
// Meta shadow is 2:1, so tread carefully.
static bool data_mapped = false;
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
@ -585,8 +633,7 @@ void MapShadow(uptr addr, uptr size) {
// Windows wants 64K alignment.
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
if (meta_end <= mapped_meta_end)
return;
CHECK_GT(meta_end, mapped_meta_end);
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,

View File

@ -372,6 +372,10 @@ struct Context {
uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
#if SANITIZER_GO
uptr mapped_shadow_begin;
uptr mapped_shadow_end;
#endif
};
extern Context *ctx; // The one and the only global runtime context.