2019-02-27 23:44:03 +08:00
|
|
|
//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
|
2017-12-09 09:31:51 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-12-09 09:31:51 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of HWAddressSanitizer.
|
|
|
|
//
|
|
|
|
// HWAddressSanitizer allocator.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "sanitizer_common/sanitizer_atomic.h"
|
|
|
|
#include "sanitizer_common/sanitizer_errno.h"
|
|
|
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
|
|
|
#include "hwasan.h"
|
|
|
|
#include "hwasan_allocator.h"
|
2019-07-10 04:22:36 +08:00
|
|
|
#include "hwasan_checks.h"
|
2018-04-24 02:19:23 +08:00
|
|
|
#include "hwasan_mapping.h"
|
2019-02-16 02:38:14 +08:00
|
|
|
#include "hwasan_malloc_bisect.h"
|
2017-12-09 09:31:51 +08:00
|
|
|
#include "hwasan_thread.h"
|
2018-08-24 09:12:26 +08:00
|
|
|
#include "hwasan_report.h"
|
2017-12-09 09:31:51 +08:00
|
|
|
|
|
|
|
namespace __hwasan {
|
|
|
|
|
2018-10-11 06:24:44 +08:00
|
|
|
static Allocator allocator;
|
|
|
|
static AllocatorCache fallback_allocator_cache;
|
|
|
|
static SpinMutex fallback_mutex;
|
|
|
|
static atomic_uint8_t hwasan_allocator_tagging_enabled;
|
|
|
|
|
2021-03-24 02:57:12 +08:00
|
|
|
static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
|
|
|
|
static constexpr tag_t kFallbackFreeTag = 0xBC;
|
2018-10-11 06:24:44 +08:00
|
|
|
|
2018-11-17 03:38:48 +08:00
|
|
|
enum RightAlignMode {
|
|
|
|
kRightAlignNever,
|
|
|
|
kRightAlignSometimes,
|
|
|
|
kRightAlignAlways
|
|
|
|
};
|
|
|
|
|
2018-11-17 08:25:17 +08:00
|
|
|
// Initialized in HwasanAllocatorInit, an never changed.
|
2019-07-10 04:22:36 +08:00
|
|
|
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
|
2018-11-17 03:38:48 +08:00
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
bool HwasanChunkView::IsAllocated() const {
|
2020-10-20 07:38:03 +08:00
|
|
|
return metadata_ && metadata_->alloc_context_id &&
|
|
|
|
metadata_->get_requested_size();
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
2018-08-31 13:55:18 +08:00
|
|
|
|
2018-11-17 03:38:48 +08:00
|
|
|
// Aligns the 'addr' right to the granule boundary.
|
|
|
|
static uptr AlignRight(uptr addr, uptr requested_size) {
|
|
|
|
uptr tail_size = requested_size % kShadowAlignment;
|
|
|
|
if (!tail_size) return addr;
|
|
|
|
return addr + kShadowAlignment - tail_size;
|
|
|
|
}
|
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
uptr HwasanChunkView::Beg() const {
|
2018-11-17 03:38:48 +08:00
|
|
|
if (metadata_ && metadata_->right_aligned)
|
2020-10-20 07:38:03 +08:00
|
|
|
return AlignRight(block_, metadata_->get_requested_size());
|
2017-12-09 09:31:51 +08:00
|
|
|
return block_;
|
|
|
|
}
|
|
|
|
uptr HwasanChunkView::End() const {
|
|
|
|
return Beg() + UsedSize();
|
|
|
|
}
|
|
|
|
uptr HwasanChunkView::UsedSize() const {
|
2020-10-20 07:38:03 +08:00
|
|
|
return metadata_->get_requested_size();
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
u32 HwasanChunkView::GetAllocStackId() const {
|
|
|
|
return metadata_->alloc_context_id;
|
|
|
|
}
|
|
|
|
|
2018-10-11 06:24:44 +08:00
|
|
|
uptr HwasanChunkView::ActualSize() const {
|
|
|
|
return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
|
|
|
|
}
|
2017-12-09 09:31:51 +08:00
|
|
|
|
2018-10-11 06:24:44 +08:00
|
|
|
bool HwasanChunkView::FromSmallHeap() const {
|
|
|
|
return allocator.FromPrimary(reinterpret_cast<void *>(block_));
|
|
|
|
}
|
2018-01-04 05:42:28 +08:00
|
|
|
|
2018-09-07 06:08:41 +08:00
|
|
|
void GetAllocatorStats(AllocatorStatCounters s) {
|
|
|
|
allocator.GetStats(s);
|
|
|
|
}
|
|
|
|
|
2021-07-10 04:41:48 +08:00
|
|
|
uptr GetAliasRegionStart() {
|
|
|
|
#if defined(HWASAN_ALIASING_MODE)
|
|
|
|
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
|
|
|
|
uptr AliasRegionStart =
|
|
|
|
__hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
|
|
|
|
|
|
|
|
CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
|
|
|
|
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
|
|
|
CHECK_EQ(
|
|
|
|
(AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
|
|
|
|
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
|
|
|
|
return AliasRegionStart;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
void HwasanAllocatorInit() {
|
|
|
|
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
|
|
|
|
!flags()->disable_allocator_tagging);
|
|
|
|
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
2021-03-25 21:34:25 +08:00
|
|
|
allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
|
2021-07-10 04:41:48 +08:00
|
|
|
GetAliasRegionStart());
|
2019-07-10 04:22:36 +08:00
|
|
|
for (uptr i = 0; i < sizeof(tail_magic); i++)
|
2018-11-17 08:25:17 +08:00
|
|
|
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
2021-08-14 08:09:10 +08:00
|
|
|
void HwasanAllocatorLock() { allocator.ForceLock(); }
|
|
|
|
|
|
|
|
void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
|
|
|
|
|
2018-09-06 07:22:38 +08:00
|
|
|
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
|
|
|
|
allocator.SwallowCache(cache);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
2018-09-01 01:49:49 +08:00
|
|
|
static uptr TaggedSize(uptr size) {
|
|
|
|
if (!size) size = 1;
|
2018-09-07 08:27:11 +08:00
|
|
|
uptr new_size = RoundUpTo(size, kShadowAlignment);
|
|
|
|
CHECK_GE(new_size, size);
|
|
|
|
return new_size;
|
2018-09-01 01:49:49 +08:00
|
|
|
}
|
|
|
|
|
2018-08-31 11:18:31 +08:00
|
|
|
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|
|
|
bool zeroise) {
|
2018-09-07 08:27:11 +08:00
|
|
|
if (orig_size > kMaxAllowedMallocSize) {
|
2018-06-08 07:33:33 +08:00
|
|
|
if (AllocatorMayReturnNull()) {
|
|
|
|
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
|
2018-09-07 08:27:11 +08:00
|
|
|
orig_size);
|
2018-06-08 07:33:33 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
2018-09-07 08:27:11 +08:00
|
|
|
ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
2018-09-07 08:27:11 +08:00
|
|
|
|
|
|
|
alignment = Max(alignment, kShadowAlignment);
|
|
|
|
uptr size = TaggedSize(orig_size);
|
2018-08-30 08:13:20 +08:00
|
|
|
Thread *t = GetCurrentThread();
|
2017-12-09 09:31:51 +08:00
|
|
|
void *allocated;
|
|
|
|
if (t) {
|
2018-09-06 07:22:38 +08:00
|
|
|
allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
|
2017-12-09 09:31:51 +08:00
|
|
|
} else {
|
|
|
|
SpinMutexLock l(&fallback_mutex);
|
|
|
|
AllocatorCache *cache = &fallback_allocator_cache;
|
|
|
|
allocated = allocator.Allocate(cache, size, alignment);
|
|
|
|
}
|
2018-06-08 07:33:33 +08:00
|
|
|
if (UNLIKELY(!allocated)) {
|
|
|
|
SetAllocatorOutOfMemory();
|
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return nullptr;
|
|
|
|
ReportOutOfMemory(size, stack);
|
|
|
|
}
|
2017-12-09 09:31:51 +08:00
|
|
|
Metadata *meta =
|
|
|
|
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
|
2020-10-20 07:38:03 +08:00
|
|
|
meta->set_requested_size(orig_size);
|
2017-12-09 09:31:51 +08:00
|
|
|
meta->alloc_context_id = StackDepotPut(*stack);
|
2018-11-17 03:38:48 +08:00
|
|
|
meta->right_aligned = false;
|
2018-08-17 04:13:09 +08:00
|
|
|
if (zeroise) {
|
2017-12-09 09:31:51 +08:00
|
|
|
internal_memset(allocated, 0, size);
|
2018-08-17 04:13:09 +08:00
|
|
|
} else if (flags()->max_malloc_fill_size > 0) {
|
|
|
|
uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
|
|
|
|
internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
|
|
|
|
}
|
2019-07-10 04:22:36 +08:00
|
|
|
if (size != orig_size) {
|
2018-11-17 08:25:17 +08:00
|
|
|
internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
|
2019-07-10 04:22:36 +08:00
|
|
|
size - orig_size - 1);
|
|
|
|
}
|
2017-12-09 09:31:51 +08:00
|
|
|
|
2018-01-04 05:42:28 +08:00
|
|
|
void *user_ptr = allocated;
|
2019-02-16 02:38:03 +08:00
|
|
|
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
|
|
|
|
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
|
|
|
|
// retag to 0.
|
2021-03-24 02:57:12 +08:00
|
|
|
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
|
|
|
|
(flags()->tag_in_malloc || flags()->tag_in_free) &&
|
2019-02-16 02:38:03 +08:00
|
|
|
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
|
2019-07-10 04:22:36 +08:00
|
|
|
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
|
|
|
|
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
|
|
|
|
uptr tag_size = orig_size ? orig_size : 1;
|
|
|
|
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
|
|
|
|
user_ptr =
|
|
|
|
(void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
|
|
|
|
if (full_granule_size != tag_size) {
|
|
|
|
u8 *short_granule =
|
|
|
|
reinterpret_cast<u8 *>(allocated) + full_granule_size;
|
|
|
|
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
|
|
|
|
tag_size % kShadowAlignment);
|
|
|
|
short_granule[kShadowAlignment - 1] = tag;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
|
2018-11-17 03:38:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
HWASAN_MALLOC_HOOK(user_ptr, size);
|
|
|
|
return user_ptr;
|
|
|
|
}
|
|
|
|
|
2018-08-30 06:21:22 +08:00
|
|
|
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
|
|
|
|
CHECK(tagged_ptr);
|
2019-07-10 04:22:36 +08:00
|
|
|
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
|
2021-03-24 02:57:12 +08:00
|
|
|
if (!InTaggableRegion(tagged_uptr))
|
|
|
|
return true;
|
2018-08-24 09:12:26 +08:00
|
|
|
tag_t mem_tag = *reinterpret_cast<tag_t *>(
|
2018-08-30 06:42:16 +08:00
|
|
|
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
|
2019-07-10 04:22:36 +08:00
|
|
|
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
|
2018-08-24 09:12:26 +08:00
|
|
|
}
|
|
|
|
|
2021-08-06 00:03:09 +08:00
|
|
|
static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
|
|
|
|
void *tagged_ptr) {
|
|
|
|
// This function can return true if halt_on_error is false.
|
2021-08-19 23:52:45 +08:00
|
|
|
if (!PointerAndMemoryTagsMatch(tagged_ptr)) {
|
2021-08-06 00:03:09 +08:00
|
|
|
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
hwasan: Use system allocator to realloc and free untagged pointers in interceptor mode.
The Android dynamic loader has a non-standard feature that allows
libraries such as the hwasan runtime to interpose symbols even after
the symbol already has a value. The new value of the symbol is used to
relocate libraries loaded after the interposing library, but existing
libraries keep the old value. This behaviour is activated by the
DF_1_GLOBAL flag in DT_FLAGS_1, which is set by passing -z global to
the linker, which is what we already do to link the hwasan runtime.
What this means in practice is that if we have .so files that depend
on interceptor-mode hwasan without the main executable depending on
it, some of the libraries in the process will be using the hwasan
allocator and some will be using the system allocator, and these
allocators need to interact somehow. For example, if an instrumented
library calls a function such as strdup that allocates memory on
behalf of the caller, the instrumented library can reasonably expect
to be able to call free to deallocate the memory.
We can handle that relatively easily with hwasan by using tag 0 to
represent allocations from the system allocator. If hwasan's realloc
or free functions are passed a pointer with tag 0, the system allocator
is called.
One limitation is that this scheme doesn't work in reverse: if an
instrumented library allocates memory, it must free the memory itself
and cannot pass ownership to a system library. In a future change,
we may want to expose an API for calling the system allocator so
that instrumented libraries can safely transfer ownership of memory
to system libraries.
Differential Revision: https://reviews.llvm.org/D55986
llvm-svn: 350427
2019-01-05 03:21:51 +08:00
|
|
|
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
2018-08-30 06:21:22 +08:00
|
|
|
CHECK(tagged_ptr);
|
|
|
|
HWASAN_FREE_HOOK(tagged_ptr);
|
2021-08-19 00:36:48 +08:00
|
|
|
|
|
|
|
bool in_taggable_region =
|
|
|
|
InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
|
|
|
|
void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
|
|
|
|
|
2021-08-06 00:03:09 +08:00
|
|
|
if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
|
|
|
|
return;
|
|
|
|
|
2018-11-17 03:38:48 +08:00
|
|
|
void *aligned_ptr = reinterpret_cast<void *>(
|
|
|
|
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
|
2021-06-28 21:19:43 +08:00
|
|
|
tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
|
2018-08-30 06:21:22 +08:00
|
|
|
Metadata *meta =
|
2018-11-17 03:38:48 +08:00
|
|
|
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
|
2021-08-06 00:03:09 +08:00
|
|
|
if (!meta) {
|
|
|
|
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
|
|
|
|
return;
|
|
|
|
}
|
2020-10-20 07:38:03 +08:00
|
|
|
uptr orig_size = meta->get_requested_size();
|
2018-08-30 05:07:07 +08:00
|
|
|
u32 free_context_id = StackDepotPut(*stack);
|
2018-08-31 06:11:56 +08:00
|
|
|
u32 alloc_context_id = meta->alloc_context_id;
|
2018-11-17 08:25:17 +08:00
|
|
|
|
|
|
|
// Check tail magic.
|
|
|
|
uptr tagged_size = TaggedSize(orig_size);
|
2019-07-10 04:22:36 +08:00
|
|
|
if (flags()->free_checks_tail_magic && orig_size &&
|
|
|
|
tagged_size != orig_size) {
|
|
|
|
uptr tail_size = tagged_size - orig_size - 1;
|
2018-11-17 08:25:17 +08:00
|
|
|
CHECK_LT(tail_size, kShadowAlignment);
|
|
|
|
void *tail_beg = reinterpret_cast<void *>(
|
|
|
|
reinterpret_cast<uptr>(aligned_ptr) + orig_size);
|
2021-08-19 00:36:48 +08:00
|
|
|
tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
|
|
|
|
reinterpret_cast<uptr>(tail_beg) + tail_size));
|
|
|
|
if (tail_size &&
|
|
|
|
(internal_memcmp(tail_beg, tail_magic, tail_size) ||
|
|
|
|
(in_taggable_region && pointer_tag != short_granule_memtag)))
|
2018-11-17 08:25:17 +08:00
|
|
|
ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
|
2019-07-10 04:22:36 +08:00
|
|
|
orig_size, tail_magic);
|
2018-11-17 08:25:17 +08:00
|
|
|
}
|
|
|
|
|
2020-10-20 07:38:03 +08:00
|
|
|
meta->set_requested_size(0);
|
2018-08-31 13:55:18 +08:00
|
|
|
meta->alloc_context_id = 0;
|
2017-12-09 09:31:51 +08:00
|
|
|
// This memory will not be reused by anyone else, so we are free to keep it
|
|
|
|
// poisoned.
|
2018-08-30 08:13:20 +08:00
|
|
|
Thread *t = GetCurrentThread();
|
2018-08-17 04:13:09 +08:00
|
|
|
if (flags()->max_free_fill_size > 0) {
|
2018-11-17 03:38:48 +08:00
|
|
|
uptr fill_size =
|
|
|
|
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
|
|
|
|
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
|
2018-08-17 04:13:09 +08:00
|
|
|
}
|
2021-08-19 00:36:48 +08:00
|
|
|
if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
|
2021-03-24 02:57:12 +08:00
|
|
|
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
|
|
|
|
// Always store full 8-bit tags on free to maximize UAF detection.
|
2021-06-15 20:12:02 +08:00
|
|
|
tag_t tag;
|
|
|
|
if (t) {
|
|
|
|
// Make sure we are not using a short granule tag as a poison tag. This
|
|
|
|
// would make us attempt to read the memory on a UaF.
|
|
|
|
// The tag can be zero if tagging is disabled on this thread.
|
|
|
|
do {
|
|
|
|
tag = t->GenerateRandomTag(/*num_bits=*/8);
|
2021-06-28 21:19:43 +08:00
|
|
|
} while (
|
|
|
|
UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
|
2021-06-15 20:12:02 +08:00
|
|
|
} else {
|
|
|
|
static_assert(kFallbackFreeTag >= kShadowAlignment,
|
|
|
|
"fallback tag must not be a short granule tag.");
|
|
|
|
tag = kFallbackFreeTag;
|
|
|
|
}
|
2018-11-17 03:38:48 +08:00
|
|
|
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
|
2021-03-24 02:57:12 +08:00
|
|
|
tag);
|
|
|
|
}
|
2017-12-09 09:31:51 +08:00
|
|
|
if (t) {
|
2018-11-17 03:38:48 +08:00
|
|
|
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
|
2018-08-30 05:07:07 +08:00
|
|
|
if (auto *ha = t->heap_allocations())
|
2018-08-31 06:11:56 +08:00
|
|
|
ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
|
2018-09-01 01:49:49 +08:00
|
|
|
free_context_id, static_cast<u32>(orig_size)});
|
2017-12-09 09:31:51 +08:00
|
|
|
} else {
|
|
|
|
SpinMutexLock l(&fallback_mutex);
|
|
|
|
AllocatorCache *cache = &fallback_allocator_cache;
|
2018-11-17 03:38:48 +08:00
|
|
|
allocator.Deallocate(cache, aligned_ptr);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
hwasan: Use system allocator to realloc and free untagged pointers in interceptor mode.
The Android dynamic loader has a non-standard feature that allows
libraries such as the hwasan runtime to interpose symbols even after
the symbol already has a value. The new value of the symbol is used to
relocate libraries loaded after the interposing library, but existing
libraries keep the old value. This behaviour is activated by the
DF_1_GLOBAL flag in DT_FLAGS_1, which is set by passing -z global to
the linker, which is what we already do to link the hwasan runtime.
What this means in practice is that if we have .so files that depend
on interceptor-mode hwasan without the main executable depending on
it, some of the libraries in the process will be using the hwasan
allocator and some will be using the system allocator, and these
allocators need to interact somehow. For example, if an instrumented
library calls a function such as strdup that allocates memory on
behalf of the caller, the instrumented library can reasonably expect
to be able to call free to deallocate the memory.
We can handle that relatively easily with hwasan by using tag 0 to
represent allocations from the system allocator. If hwasan's realloc
or free functions are passed a pointer with tag 0, the system allocator
is called.
One limitation is that this scheme doesn't work in reverse: if an
instrumented library allocates memory, it must free the memory itself
and cannot pass ownership to a system library. In a future change,
we may want to expose an API for calling the system allocator so
that instrumented libraries can safely transfer ownership of memory
to system libraries.
Differential Revision: https://reviews.llvm.org/D55986
llvm-svn: 350427
2019-01-05 03:21:51 +08:00
|
|
|
static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
|
|
|
|
uptr new_size, uptr alignment) {
|
2021-08-06 00:03:09 +08:00
|
|
|
void *untagged_ptr_old =
|
|
|
|
InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
|
|
|
|
? UntagPtr(tagged_ptr_old)
|
|
|
|
: tagged_ptr_old;
|
|
|
|
if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
|
|
|
|
return nullptr;
|
2018-08-30 06:21:22 +08:00
|
|
|
void *tagged_ptr_new =
|
|
|
|
HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
|
|
|
|
if (tagged_ptr_old && tagged_ptr_new) {
|
2018-08-30 05:28:14 +08:00
|
|
|
Metadata *meta =
|
|
|
|
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
|
2020-10-20 07:38:03 +08:00
|
|
|
internal_memcpy(
|
|
|
|
UntagPtr(tagged_ptr_new), untagged_ptr_old,
|
|
|
|
Min(new_size, static_cast<uptr>(meta->get_requested_size())));
|
2018-08-30 06:21:22 +08:00
|
|
|
HwasanDeallocate(stack, tagged_ptr_old);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
2018-08-30 06:21:22 +08:00
|
|
|
return tagged_ptr_new;
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
hwasan: Use system allocator to realloc and free untagged pointers in interceptor mode.
The Android dynamic loader has a non-standard feature that allows
libraries such as the hwasan runtime to interpose symbols even after
the symbol already has a value. The new value of the symbol is used to
relocate libraries loaded after the interposing library, but existing
libraries keep the old value. This behaviour is activated by the
DF_1_GLOBAL flag in DT_FLAGS_1, which is set by passing -z global to
the linker, which is what we already do to link the hwasan runtime.
What this means in practice is that if we have .so files that depend
on interceptor-mode hwasan without the main executable depending on
it, some of the libraries in the process will be using the hwasan
allocator and some will be using the system allocator, and these
allocators need to interact somehow. For example, if an instrumented
library calls a function such as strdup that allocates memory on
behalf of the caller, the instrumented library can reasonably expect
to be able to call free to deallocate the memory.
We can handle that relatively easily with hwasan by using tag 0 to
represent allocations from the system allocator. If hwasan's realloc
or free functions are passed a pointer with tag 0, the system allocator
is called.
One limitation is that this scheme doesn't work in reverse: if an
instrumented library allocates memory, it must free the memory itself
and cannot pass ownership to a system library. In a future change,
we may want to expose an API for calling the system allocator so
that instrumented libraries can safely transfer ownership of memory
to system libraries.
Differential Revision: https://reviews.llvm.org/D55986
llvm-svn: 350427
2019-01-05 03:21:51 +08:00
|
|
|
static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
|
2018-06-08 07:33:33 +08:00
|
|
|
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
|
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return nullptr;
|
|
|
|
ReportCallocOverflow(nmemb, size, stack);
|
|
|
|
}
|
|
|
|
return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
|
|
|
|
}
|
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
HwasanChunkView FindHeapChunkByAddress(uptr address) {
|
2021-08-06 00:03:09 +08:00
|
|
|
if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
|
|
|
|
return HwasanChunkView();
|
2017-12-09 09:31:51 +08:00
|
|
|
void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
|
|
|
|
if (!block)
|
|
|
|
return HwasanChunkView();
|
|
|
|
Metadata *metadata =
|
|
|
|
reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
|
|
|
|
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
|
|
|
|
}
|
|
|
|
|
2018-08-30 06:21:22 +08:00
|
|
|
static uptr AllocationSize(const void *tagged_ptr) {
|
|
|
|
const void *untagged_ptr = UntagPtr(tagged_ptr);
|
|
|
|
if (!untagged_ptr) return 0;
|
|
|
|
const void *beg = allocator.GetBlockBegin(untagged_ptr);
|
|
|
|
Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
|
2018-11-17 03:38:48 +08:00
|
|
|
if (b->right_aligned) {
|
|
|
|
if (beg != reinterpret_cast<void *>(RoundDownTo(
|
|
|
|
reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
if (beg != untagged_ptr) return 0;
|
|
|
|
}
|
2020-10-20 07:38:03 +08:00
|
|
|
return b->get_requested_size();
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *hwasan_malloc(uptr size, StackTrace *stack) {
|
|
|
|
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
|
2018-06-08 07:33:33 +08:00
|
|
|
return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
|
|
|
|
if (!ptr)
|
|
|
|
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
|
|
|
|
if (size == 0) {
|
|
|
|
HwasanDeallocate(stack, ptr);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
|
|
|
|
}
|
|
|
|
|
2019-05-02 01:33:01 +08:00
|
|
|
void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
|
|
|
|
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
|
|
|
|
errno = errno_ENOMEM;
|
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return nullptr;
|
|
|
|
ReportReallocArrayOverflow(nmemb, size, stack);
|
|
|
|
}
|
|
|
|
return hwasan_realloc(ptr, nmemb * size, stack);
|
|
|
|
}
|
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
void *hwasan_valloc(uptr size, StackTrace *stack) {
|
2018-06-08 07:33:33 +08:00
|
|
|
return SetErrnoOnNull(
|
|
|
|
HwasanAllocate(stack, size, GetPageSizeCached(), false));
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *hwasan_pvalloc(uptr size, StackTrace *stack) {
|
|
|
|
uptr PageSize = GetPageSizeCached();
|
|
|
|
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
|
|
|
|
errno = errno_ENOMEM;
|
2018-06-08 07:33:33 +08:00
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return nullptr;
|
|
|
|
ReportPvallocOverflow(size, stack);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
// pvalloc(0) should allocate one page.
|
|
|
|
size = size ? RoundUpTo(size, PageSize) : PageSize;
|
|
|
|
return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
|
|
|
|
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
|
|
|
|
errno = errno_EINVAL;
|
2018-06-08 07:33:33 +08:00
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return nullptr;
|
|
|
|
ReportInvalidAlignedAllocAlignment(size, alignment, stack);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
|
|
|
|
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
|
|
|
|
errno = errno_EINVAL;
|
2018-06-08 07:33:33 +08:00
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return nullptr;
|
|
|
|
ReportInvalidAllocationAlignment(alignment, stack);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
|
|
|
StackTrace *stack) {
|
|
|
|
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
|
2018-06-08 07:33:33 +08:00
|
|
|
if (AllocatorMayReturnNull())
|
|
|
|
return errno_EINVAL;
|
|
|
|
ReportInvalidPosixMemalignAlignment(alignment, stack);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
void *ptr = HwasanAllocate(stack, size, alignment, false);
|
|
|
|
if (UNLIKELY(!ptr))
|
2018-06-08 07:33:33 +08:00
|
|
|
// OOM error is already taken care of by HwasanAllocate.
|
2017-12-09 09:31:51 +08:00
|
|
|
return errno_ENOMEM;
|
|
|
|
CHECK(IsAligned((uptr)ptr, alignment));
|
2021-03-25 21:34:25 +08:00
|
|
|
*memptr = ptr;
|
2017-12-09 09:31:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
hwasan: Use system allocator to realloc and free untagged pointers in interceptor mode.
The Android dynamic loader has a non-standard feature that allows
libraries such as the hwasan runtime to interpose symbols even after
the symbol already has a value. The new value of the symbol is used to
relocate libraries loaded after the interposing library, but existing
libraries keep the old value. This behaviour is activated by the
DF_1_GLOBAL flag in DT_FLAGS_1, which is set by passing -z global to
the linker, which is what we already do to link the hwasan runtime.
What this means in practice is that if we have .so files that depend
on interceptor-mode hwasan without the main executable depending on
it, some of the libraries in the process will be using the hwasan
allocator and some will be using the system allocator, and these
allocators need to interact somehow. For example, if an instrumented
library calls a function such as strdup that allocates memory on
behalf of the caller, the instrumented library can reasonably expect
to be able to call free to deallocate the memory.
We can handle that relatively easily with hwasan by using tag 0 to
represent allocations from the system allocator. If hwasan's realloc
or free functions are passed a pointer with tag 0, the system allocator
is called.
One limitation is that this scheme doesn't work in reverse: if an
instrumented library allocates memory, it must free the memory itself
and cannot pass ownership to a system library. In a future change,
we may want to expose an API for calling the system allocator so
that instrumented libraries can safely transfer ownership of memory
to system libraries.
Differential Revision: https://reviews.llvm.org/D55986
llvm-svn: 350427
2019-01-05 03:21:51 +08:00
|
|
|
void hwasan_free(void *ptr, StackTrace *stack) {
|
|
|
|
return HwasanDeallocate(stack, ptr);
|
|
|
|
}
|
|
|
|
|
2018-06-08 07:33:33 +08:00
|
|
|
} // namespace __hwasan
|
2017-12-09 09:31:51 +08:00
|
|
|
|
|
|
|
using namespace __hwasan;
|
|
|
|
|
|
|
|
void __hwasan_enable_allocator_tagging() {
|
|
|
|
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __hwasan_disable_allocator_tagging() {
|
|
|
|
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr __sanitizer_get_current_allocated_bytes() {
|
|
|
|
uptr stats[AllocatorStatCount];
|
|
|
|
allocator.GetStats(stats);
|
|
|
|
return stats[AllocatorStatAllocated];
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr __sanitizer_get_heap_size() {
|
|
|
|
uptr stats[AllocatorStatCount];
|
|
|
|
allocator.GetStats(stats);
|
|
|
|
return stats[AllocatorStatMapped];
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr __sanitizer_get_free_bytes() { return 1; }
|
|
|
|
|
|
|
|
uptr __sanitizer_get_unmapped_bytes() { return 1; }
|
|
|
|
|
|
|
|
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
|
|
|
|
|
|
|
|
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
|
|
|
|
|
|
|
|
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
|