forked from OSchip/llvm-project
Use pthreads for thread-local lsan allocator cache on darwin
Summary: This patch allows us to move away from using __thread on darwin, which is requiring for building lsan for darwin on ios version 7 and on iossim i386. Reviewers: kubamracek, kcc Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D29994 llvm-svn: 295413
This commit is contained in:
parent
32f0ec51d1
commit
01159f5c58
|
@ -24,53 +24,18 @@
|
|||
extern "C" void *memset(void *ptr, int value, uptr num);
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
struct ChunkMetadata {
|
||||
u8 allocated : 8; // Must be first.
|
||||
ChunkTag tag : 2;
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
uptr requested_size : 54;
|
||||
#else
|
||||
uptr requested_size : 32;
|
||||
uptr padding : 22;
|
||||
#endif
|
||||
u32 stack_trace_id;
|
||||
};
|
||||
|
||||
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__)
|
||||
#if defined(__i386__)
|
||||
static const uptr kMaxAllowedMallocSize = 1UL << 30;
|
||||
#else
|
||||
#elif defined(__mips64) || defined(__aarch64__)
|
||||
static const uptr kMaxAllowedMallocSize = 4UL << 30;
|
||||
#endif
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
|
||||
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
|
||||
typedef CompactSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
|
||||
PrimaryAllocator;
|
||||
#else
|
||||
static const uptr kMaxAllowedMallocSize = 8UL << 30;
|
||||
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
#endif
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
typedef LargeMmapAllocator<> SecondaryAllocator;
|
||||
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
SecondaryAllocator> Allocator;
|
||||
|
||||
static Allocator allocator;
|
||||
static THREADLOCAL AllocatorCache cache;
|
||||
|
||||
void InitializeAllocator() {
|
||||
allocator.InitLinkerInitialized(
|
||||
|
@ -79,7 +44,7 @@ void InitializeAllocator() {
|
|||
}
|
||||
|
||||
void AllocatorThreadFinish() {
|
||||
allocator.SwallowCache(&cache);
|
||||
allocator.SwallowCache(GetAllocatorCache());
|
||||
}
|
||||
|
||||
static ChunkMetadata *Metadata(const void *p) {
|
||||
|
@ -111,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
|||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
|
||||
return nullptr;
|
||||
}
|
||||
void *p = allocator.Allocate(&cache, size, alignment, false);
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
|
||||
// Do not rely on the allocator to clear the memory (it's slow).
|
||||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
|
@ -125,7 +90,7 @@ void Deallocate(void *p) {
|
|||
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
|
||||
RunFreeHooks(p);
|
||||
RegisterDeallocation(p);
|
||||
allocator.Deallocate(&cache, p);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
}
|
||||
|
||||
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
|
||||
|
@ -133,17 +98,17 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
|
|||
RegisterDeallocation(p);
|
||||
if (new_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
|
||||
allocator.Deallocate(&cache, p);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
return nullptr;
|
||||
}
|
||||
p = allocator.Reallocate(&cache, p, new_size, alignment);
|
||||
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
|
||||
RegisterAllocation(stack, p, new_size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
|
||||
*begin = (uptr)&cache;
|
||||
*end = *begin + sizeof(cache);
|
||||
*begin = (uptr)GetAllocatorCache();
|
||||
*end = *begin + sizeof(AllocatorCache);
|
||||
}
|
||||
|
||||
uptr GetMallocUsableSize(const void *p) {
|
||||
|
|
|
@ -15,8 +15,10 @@
|
|||
#ifndef LSAN_ALLOCATOR_H
|
||||
#define LSAN_ALLOCATOR_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "lsan_common.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
|
@ -34,6 +36,41 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end);
|
|||
void AllocatorThreadFinish();
|
||||
void InitializeAllocator();
|
||||
|
||||
struct ChunkMetadata {
|
||||
u8 allocated : 8; // Must be first.
|
||||
ChunkTag tag : 2;
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
uptr requested_size : 54;
|
||||
#else
|
||||
uptr requested_size : 32;
|
||||
uptr padding : 22;
|
||||
#endif
|
||||
u32 stack_trace_id;
|
||||
};
|
||||
|
||||
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__)
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
|
||||
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
|
||||
typedef CompactSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
|
||||
PrimaryAllocator;
|
||||
#elif defined(__x86_64__)
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
#endif
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
|
||||
AllocatorCache *GetAllocatorCache();
|
||||
} // namespace __lsan
|
||||
|
||||
#endif // LSAN_ALLOCATOR_H
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "lsan_allocator.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
|
@ -38,6 +39,9 @@ static THREADLOCAL u32 current_thread_tid = kInvalidTid;
|
|||
u32 GetCurrentThread() { return current_thread_tid; }
|
||||
void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
|
||||
|
||||
static THREADLOCAL AllocatorCache allocator_cache;
|
||||
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
|
||||
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
THREADLOCAL int disable_counter;
|
||||
bool DisabledInThisThread() { return disable_counter > 0; }
|
||||
|
|
|
@ -12,12 +12,14 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#include "lsan_common.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
|
||||
|
||||
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
||||
#include "lsan_allocator.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
namespace __lsan {
|
||||
|
@ -25,6 +27,7 @@ namespace __lsan {
|
|||
typedef struct {
|
||||
int disable_counter;
|
||||
u32 current_thread_id;
|
||||
AllocatorCache cache;
|
||||
} thread_local_data_t;
|
||||
|
||||
static pthread_key_t key;
|
||||
|
@ -40,6 +43,7 @@ static thread_local_data_t *get_tls_val() {
|
|||
ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
|
||||
ptr->disable_counter = 0;
|
||||
ptr->current_thread_id = kInvalidTid;
|
||||
ptr->cache = AllocatorCache();
|
||||
pthread_setspecific(key, ptr);
|
||||
}
|
||||
|
||||
|
@ -62,6 +66,8 @@ u32 GetCurrentThread() { return get_tls_val()->current_thread_id; }
|
|||
|
||||
void SetCurrentThread(u32 tid) { get_tls_val()->current_thread_id = tid; }
|
||||
|
||||
AllocatorCache *GetAllocatorCache() { return &get_tls_val()->cache; }
|
||||
|
||||
void InitializePlatformSpecificModules() {
|
||||
CHECK(0 && "unimplemented");
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue