[sanitizer] Avoid possible deadlock in child process after fork

This patch addresses https://github.com/google/sanitizers/issues/774. When we
fork a multi-threaded process it's possible to deadlock if some thread acquired
StackDepot or allocator internal lock just before fork. In this case the lock
will never be released in child process causing deadlock on following memory alloc/dealloc
routine. While calling alloc/dealloc routines after multi-threaded fork is not allowed,
most of modern allocators (Glibc, tcmalloc, jemalloc) are actually fork safe. Let's do the same
for sanitizers except TSan that has complex locking rules.

Differential Revision: https://reviews.llvm.org/D33325

llvm-svn: 304285
This commit is contained in:
Maxim Ostapenko 2017-05-31 07:28:09 +00:00
parent 043fa4b3d6
commit 62a0f55930
8 changed files with 255 additions and 97 deletions

View File

@ -47,8 +47,6 @@ static u32 RZSize2Log(u32 rz_size) {
return res;
}
static AsanAllocator &get_allocator();
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
@ -719,7 +717,7 @@ struct Allocator {
static Allocator instance(LINKER_INITIALIZED);
static AsanAllocator &get_allocator() {
AsanAllocator &get_allocator() {
return instance.allocator;
}

View File

@ -213,5 +213,7 @@ void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
void AsanSoftRssLimitExceededCallback(bool exceeded);
AsanAllocator &get_allocator();
} // namespace __asan
#endif // ASAN_ALLOCATOR_H

View File

@ -22,6 +22,7 @@
#include "asan_stats.h"
#include "asan_suppressions.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_libc.h"
#if SANITIZER_POSIX
@ -705,11 +706,23 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
#endif // ASAN_INTERCEPT___CXA_ATEXIT
#if ASAN_INTERCEPT_FORK
static void BeforeFork() {
get_allocator().ForceLock();
StackDepotLockAll();
}
static void AfterFork() {
StackDepotUnlockAll();
get_allocator().ForceUnlock();
}
INTERCEPTOR(int, fork, void) {
ENSURE_ASAN_INITED();
BeforeFork();
if (common_flags()->coverage) CovBeforeFork();
int pid = REAL(fork)();
if (common_flags()->coverage) CovAfterFork(pid);
AfterFork();
return pid;
}
#endif // ASAN_INTERCEPT_FORK

View File

@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
@ -97,6 +98,24 @@ INTERCEPTOR(void*, valloc, uptr size) {
}
#endif
static void BeforeFork() {
LockAllocator();
StackDepotLockAll();
}
static void AfterFork() {
StackDepotUnlockAll();
UnlockAllocator();
}
INTERCEPTOR(int, fork, void) {
ENSURE_LSAN_INITED;
BeforeFork();
int pid = REAL(fork)();
AfterFork();
return pid;
}
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
@ -336,6 +355,7 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(fork);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");

View File

@ -12,8 +12,6 @@
// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "msan.h"
#include "msan_allocator.h"
#include "msan_origin.h"
@ -22,102 +20,12 @@
namespace __msan {
struct Metadata {
uptr requested_size;
};
struct MsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {}
void OnUnmap(uptr p, uptr size) const {
__msan_unpoison((void *)p, size);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
uptr shadow_p = MEM_TO_SHADOW(p);
ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
if (__msan_get_track_origins()) {
uptr origin_p = MEM_TO_ORIGIN(p);
ReleaseMemoryPagesToOS(origin_p, origin_p + size);
}
}
};
#if defined(__mips64)
static const uptr kMaxAllowedMallocSize = 2UL << 30;
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
typedef __msan::ByteMap ByteMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__)
#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
static const uptr kAllocatorSpace = 0x700000000000ULL;
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
static const uptr kMaxAllowedMallocSize = 8UL << 30;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = 0x40000000000; // 4T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__powerpc64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x300000000000;
static const uptr kSpaceSize = 0x020000000000; // 2T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
typedef __msan::ByteMap ByteMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
Allocator &get_allocator() { return allocator; }
void MsanAllocatorInit() {
allocator.Init(
common_flags()->allocator_may_return_null,

View File

@ -15,9 +15,106 @@
#define MSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
namespace __msan {
struct Metadata {
uptr requested_size;
};
struct MsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {}
void OnUnmap(uptr p, uptr size) const {
__msan_unpoison((void *)p, size);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
uptr shadow_p = MEM_TO_SHADOW(p);
ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
if (__msan_get_track_origins()) {
uptr origin_p = MEM_TO_ORIGIN(p);
ReleaseMemoryPagesToOS(origin_p, origin_p + size);
}
}
};
#if defined(__mips64)
static const uptr kMaxAllowedMallocSize = 2UL << 30;
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
typedef __msan::ByteMap ByteMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__)
#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
static const uptr kAllocatorSpace = 0x700000000000ULL;
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
static const uptr kMaxAllowedMallocSize = 8UL << 30;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = 0x40000000000; // 4T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__powerpc64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x300000000000;
static const uptr kSpaceSize = 0x020000000000; // 2T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
typedef __msan::ByteMap ByteMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
Allocator &get_allocator();
struct MsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.

View File

@ -1228,6 +1228,7 @@ INTERCEPTOR(void *, shmat, int shmid, const void *shmaddr, int shmflg) {
}
static void BeforeFork() {
get_allocator().ForceLock();
StackDepotLockAll();
ChainedOriginDepotLockAll();
}
@ -1235,6 +1236,7 @@ static void BeforeFork() {
static void AfterFork() {
ChainedOriginDepotUnlockAll();
StackDepotUnlockAll();
get_allocator().ForceUnlock();
}
INTERCEPTOR(int, fork, void) {

View File

@ -0,0 +1,118 @@
// https://github.com/google/sanitizers/issues/774
// Test that sanitizer allocator is fork-safe.
// Run a number of threads that perform memory allocation/deallocation, then fork
// and verify that malloc/free do not deadlock in the child process.
// RUN: %clangxx -std=c++11 -O0 %s -o %t
// RUN: ASAN_OPTIONS=detect_leaks=0 %run %t 2>&1 | FileCheck %s
// Fun fact: if test output is redirected to a file (as opposed to
// being piped directly to FileCheck), we may lose some "done"s due to
// a kernel bug:
// https://lkml.org/lkml/2014/2/17/324
// UNSUPPORTED: tsan
// Flaky on PPC64.
// UNSUPPORTED: powerpc64-target-arch
// UNSUPPORTED: powerpc64le-target-arch
#include <pthread.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <signal.h>
#include <errno.h>
int done;
void *worker(void *arg) {
while (true) {
void *p = malloc(4);
if (__atomic_load_n(&done, __ATOMIC_RELAXED))
return 0;
}
return 0;
}
// Run through malloc/free in the child process.
// This can deadlock on allocator cache refilling.
void child() {
for (int i = 0; i < 10000; ++i) {
void *p = malloc(4);
}
write(2, "done\n", 5);
}
void test() {
const int kThreads = 10;
pthread_t t[kThreads];
for (int i = 0; i < kThreads; ++i)
pthread_create(&t[i], NULL, worker, (void*)(long)i);
usleep(100000);
pid_t pid = fork();
if (pid) {
// parent
__atomic_store_n(&done, 1, __ATOMIC_RELAXED);
pid_t p;
while ((p = wait(NULL)) == -1) { }
} else {
// child
child();
}
}
int main() {
const int kChildren = 30;
for (int i = 0; i < kChildren; ++i) {
pid_t pid = fork();
if (pid) {
// parent
} else {
test();
exit(0);
}
}
for (int i = 0; i < kChildren; ++i) {
pid_t p;
while ((p = wait(NULL)) == -1) { }
}
return 0;
}
// Expect 30 (== kChildren) "done" messages.
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done
// CHECK: done