[Sanitizers] TSan allocator set errno on failure.

Summary:
Set proper errno code on allocation failures and change realloc, pvalloc,
aligned_alloc, memalign and posix_memalign implementation to satisfy
their man-specified requirements.

Modify allocator API implementation to bring it closer to other
sanitizers allocators.

Reviewers: dvyukov

Subscribers: llvm-commits, kubamracek

Differential Revision: https://reviews.llvm.org/D35690

llvm-svn: 308929
This commit is contained in:
Alex Shlyapnikov 2017-07-24 21:22:59 +00:00
parent e0ba415740
commit 132689243e
8 changed files with 181 additions and 44 deletions

View File

@ -48,8 +48,8 @@ static bool bogusfd(int fd) {
}
static FdSync *allocsync(ThreadState *thr, uptr pc) {
FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
false);
FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
kDefaultAlignment, false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@ -79,7 +79,7 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))

View File

@ -584,7 +584,7 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
return user_alloc(thr, pc, sz, align);
return user_memalign(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
@ -730,7 +730,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
return user_alloc(thr, pc, sz, align);
return user_memalign(thr, pc, align, sz);
}
#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
#else
@ -739,21 +739,20 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
return user_alloc(thr, pc, sz, align);
SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
return user_aligned_alloc(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
return user_alloc(thr, pc, sz, GetPageSizeCached());
return user_valloc(thr, pc, sz);
}
#endif
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
sz = RoundUp(sz, GetPageSizeCached());
return user_alloc(thr, pc, sz, GetPageSizeCached());
return user_pvalloc(thr, pc, sz);
}
#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
#else
@ -763,8 +762,7 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
*memptr = user_alloc(thr, pc, sz, align);
return 0;
return user_posix_memalign(thr, pc, memptr, align, sz);
}
#endif

View File

@ -86,7 +86,8 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
void *orig_context,
dispatch_function_t orig_work) {
tsan_block_context_t *new_context =
(tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t));
(tsan_block_context_t *)user_alloc_internal(thr, pc,
sizeof(tsan_block_context_t));
new_context->queue = queue;
new_context->orig_context = orig_context;
new_context->orig_work = orig_work;

View File

@ -26,7 +26,7 @@ using namespace __tsan;
#define COMMON_MALLOC_FORCE_UNLOCK()
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
void *p = \
user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
SCOPED_INTERCEPTOR_RAW(malloc, size); \
@ -43,7 +43,7 @@ using namespace __tsan;
if (cur_thread()->in_symbolizer) \
return InternalAlloc(size, nullptr, GetPageSizeCached()); \
SCOPED_INTERCEPTOR_RAW(valloc, size); \
void *p = user_alloc(thr, pc, size, GetPageSizeCached())
void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \

View File

@ -149,11 +149,12 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
OutputReport(thr, rep);
}
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return Allocator::FailureHandler::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
if (UNLIKELY(p == 0))
return 0;
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
@ -162,15 +163,6 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
return p;
}
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CheckForCallocOverflow(size, n))
return Allocator::FailureHandler::OnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
return p;
}
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
@ -180,6 +172,19 @@ void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
SignalUnsafeCall(thr, pc);
}
void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
}
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (UNLIKELY(CheckForCallocOverflow(size, n)))
return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
void *p = user_alloc_internal(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
return SetErrnoOnNull(p);
}
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
ctx->metamap.AllocBlock(thr, pc, p, sz);
@ -200,15 +205,60 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
void *p2 = user_alloc(thr, pc, sz);
if (p2 == 0)
return 0;
if (p) {
uptr oldsz = user_alloc_usable_size(p);
internal_memcpy(p2, p, min(oldsz, sz));
if (!p)
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
if (!sz) {
user_free(thr, pc, p);
return nullptr;
}
void *new_p = user_alloc_internal(thr, pc, sz);
if (new_p) {
uptr old_sz = user_alloc_usable_size(p);
internal_memcpy(new_p, p, min(old_sz, sz));
user_free(thr, pc, p);
}
return p2;
return SetErrnoOnNull(new_p);
}
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!IsPowerOfTwo(align))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
uptr sz) {
if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
Allocator::FailureHandler::OnBadRequest();
return errno_EINVAL;
}
void *ptr = user_alloc_internal(thr, pc, sz, align);
if (UNLIKELY(!ptr))
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, align));
*memptr = ptr;
return 0;
}
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
}
void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
uptr PageSize = GetPageSizeCached();
// pvalloc(0) should allocate one page.
sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
uptr user_alloc_usable_size(const void *p) {

View File

@ -27,13 +27,20 @@ void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
uptr align = kDefaultAlignment, bool signal = true);
void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
// Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
// Interceptor implementations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
uptr sz);
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.

View File

@ -56,6 +56,7 @@ TEST(Mman, UserRealloc) {
// Realloc(NULL, N) is equivalent to malloc(N), thus must return
// non-NULL pointer.
EXPECT_NE(p, (void*)0);
user_free(thr, pc, p);
}
{
void *p = user_realloc(thr, pc, 0, 100);
@ -67,8 +68,9 @@ TEST(Mman, UserRealloc) {
void *p = user_alloc(thr, pc, 100);
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
// Realloc(P, 0) is equivalent to free(P) and returns NULL.
void *p2 = user_realloc(thr, pc, p, 0);
EXPECT_NE(p2, (void*)0);
EXPECT_EQ(p2, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
@ -135,12 +137,28 @@ TEST(Mman, Stats) {
EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes());
}
TEST(Mman, Valloc) {
ThreadState *thr = cur_thread();
void *p = user_valloc(thr, 0, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = user_pvalloc(thr, 0, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = user_pvalloc(thr, 0, 0);
EXPECT_NE(p, (void*)0);
EXPECT_EQ(GetPageSizeCached(), __sanitizer_get_allocated_size(p));
user_free(thr, 0, p);
}
#if !SANITIZER_DEBUG
// EXPECT_DEATH clones a thread with 4K stack,
// which is overflown by tsan memory accesses functions in debug mode.
TEST(Mman, CallocOverflow) {
#if SANITIZER_DEBUG
// EXPECT_DEATH clones a thread with 4K stack,
// which is overflown by tsan memory accesses functions in debug mode.
return;
#endif
ThreadState *thr = cur_thread();
uptr pc = 0;
size_t kArraySize = 4096;
@ -152,4 +170,57 @@ TEST(Mman, CallocOverflow) {
EXPECT_EQ(0L, p);
}
TEST(Mman, Memalign) {
ThreadState *thr = cur_thread();
void *p = user_memalign(thr, 0, 8, 100);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = NULL;
EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, PosixMemalign) {
ThreadState *thr = cur_thread();
void *p = NULL;
int res = user_posix_memalign(thr, 0, &p, 8, 100);
EXPECT_NE(p, (void*)0);
EXPECT_EQ(res, 0);
user_free(thr, 0, p);
p = NULL;
// Alignment is not a power of two, although is a multiple of sizeof(void*).
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
// Alignment is not a multiple of sizeof(void*), although is a power of 2.
EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
TEST(Mman, AlignedAlloc) {
ThreadState *thr = cur_thread();
void *p = user_aligned_alloc(thr, 0, 8, 64);
EXPECT_NE(p, (void*)0);
user_free(thr, 0, p);
p = NULL;
// Alignement is not a power of 2.
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
// Size is not a multiple of alignment.
EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100),
"allocator is terminating the process instead of returning 0");
EXPECT_EQ(0L, p);
}
#endif
} // namespace __tsan

View File

@ -37,9 +37,10 @@
// RUN: | FileCheck %s --check-prefix=CHECK-nnNULL
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits>
#include <new>
@ -51,6 +52,7 @@ int main(int argc, char **argv) {
const char *action = argv[1];
fprintf(stderr, "%s:\n", action);
// The limit enforced in tsan_mman.cc, user_alloc_internal function.
static const size_t kMaxAllowedMallocSizePlusOne = (1ULL << 40) + 1;
void *x = 0;
@ -78,10 +80,13 @@ int main(int argc, char **argv) {
assert(0);
}
fprintf(stderr, "errno: %d\n", errno);
// The NULL pointer is printed differently on different systems, while (long)0
// is always the same.
fprintf(stderr, "x: %lx\n", (long)x);
free(x);
return x != 0;
}
@ -101,14 +106,19 @@ int main(int argc, char **argv) {
// CHECK-nnCRASH: ThreadSanitizer's allocator is terminating the process
// CHECK-mNULL: malloc:
// CHECK-mNULL: errno: 12
// CHECK-mNULL: x: 0
// CHECK-cNULL: calloc:
// CHECK-cNULL: errno: 12
// CHECK-cNULL: x: 0
// CHECK-coNULL: calloc-overflow:
// CHECK-coNULL: errno: 12
// CHECK-coNULL: x: 0
// CHECK-rNULL: realloc:
// CHECK-rNULL: errno: 12
// CHECK-rNULL: x: 0
// CHECK-mrNULL: realloc-after-malloc:
// CHECK-mrNULL: errno: 12
// CHECK-mrNULL: x: 0
// CHECK-nnNULL: new-nothrow:
// CHECK-nnNULL: x: 0