forked from OSchip/llvm-project
[hwasan] optionally right-align heap allocations
Summary: ... so that we can find intra-granule buffer overflows. The default is still to always align left. It remains to be seen wether we can enable this mode at scale. Reviewers: eugenis Reviewed By: eugenis Subscribers: jfb, dvyukov, kubamracek, delcypher, #sanitizers, llvm-commits Differential Revision: https://reviews.llvm.org/D53789 llvm-svn: 347082
This commit is contained in:
parent
f2f39be9ed
commit
ba5c7169c5
|
@ -31,11 +31,34 @@ static atomic_uint8_t hwasan_allocator_tagging_enabled;
|
|||
static const tag_t kFallbackAllocTag = 0xBB;
|
||||
static const tag_t kFallbackFreeTag = 0xBC;
|
||||
|
||||
enum RightAlignMode {
|
||||
kRightAlignNever,
|
||||
kRightAlignSometimes,
|
||||
kRightAlignAlways
|
||||
};
|
||||
|
||||
// These two variables are initialized from flags()->malloc_align_right
|
||||
// in HwasanAllocatorInit and are never changed afterwards.
|
||||
static RightAlignMode right_align_mode = kRightAlignNever;
|
||||
static bool right_align_8 = false;
|
||||
|
||||
|
||||
bool HwasanChunkView::IsAllocated() const {
|
||||
return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
|
||||
}
|
||||
|
||||
// Aligns the 'addr' right to the granule boundary.
|
||||
static uptr AlignRight(uptr addr, uptr requested_size) {
|
||||
uptr tail_size = requested_size % kShadowAlignment;
|
||||
if (!tail_size) return addr;
|
||||
if (right_align_8)
|
||||
return tail_size > 8 ? addr : addr + 8;
|
||||
return addr + kShadowAlignment - tail_size;
|
||||
}
|
||||
|
||||
uptr HwasanChunkView::Beg() const {
|
||||
if (metadata_ && metadata_->right_aligned)
|
||||
return AlignRight(block_, metadata_->requested_size);
|
||||
return block_;
|
||||
}
|
||||
uptr HwasanChunkView::End() const {
|
||||
|
@ -65,6 +88,30 @@ void HwasanAllocatorInit() {
|
|||
!flags()->disable_allocator_tagging);
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||
switch (flags()->malloc_align_right) {
|
||||
case 0: break;
|
||||
case 1:
|
||||
right_align_mode = kRightAlignSometimes;
|
||||
right_align_8 = false;
|
||||
break;
|
||||
case 2:
|
||||
right_align_mode = kRightAlignAlways;
|
||||
right_align_8 = false;
|
||||
break;
|
||||
case 8:
|
||||
right_align_mode = kRightAlignSometimes;
|
||||
right_align_8 = true;
|
||||
break;
|
||||
case 9:
|
||||
right_align_mode = kRightAlignAlways;
|
||||
right_align_8 = true;
|
||||
break;
|
||||
default:
|
||||
Report("ERROR: unsupported value of malloc_align_right flag: %d\n",
|
||||
flags()->malloc_align_right);
|
||||
Die();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
|
||||
|
@ -110,6 +157,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|||
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
|
||||
meta->requested_size = static_cast<u32>(orig_size);
|
||||
meta->alloc_context_id = StackDepotPut(*stack);
|
||||
meta->right_aligned = false;
|
||||
if (zeroise) {
|
||||
internal_memset(allocated, 0, size);
|
||||
} else if (flags()->max_malloc_fill_size > 0) {
|
||||
|
@ -123,6 +171,16 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
|||
user_ptr = (void *)TagMemoryAligned(
|
||||
(uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
|
||||
|
||||
if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
|
||||
right_align_mode) {
|
||||
uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
|
||||
if (right_align_mode == kRightAlignAlways ||
|
||||
GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit.
|
||||
user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
|
||||
meta->right_aligned = 1;
|
||||
}
|
||||
}
|
||||
|
||||
HWASAN_MALLOC_HOOK(user_ptr, size);
|
||||
return user_ptr;
|
||||
}
|
||||
|
@ -143,8 +201,10 @@ void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
|
||||
|
||||
void *untagged_ptr = UntagPtr(tagged_ptr);
|
||||
void *aligned_ptr = reinterpret_cast<void *>(
|
||||
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
|
||||
Metadata *meta =
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr));
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
|
||||
uptr orig_size = meta->requested_size;
|
||||
u32 free_context_id = StackDepotPut(*stack);
|
||||
u32 alloc_context_id = meta->alloc_context_id;
|
||||
|
@ -154,22 +214,23 @@ void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
|||
// poisoned.
|
||||
Thread *t = GetCurrentThread();
|
||||
if (flags()->max_free_fill_size > 0) {
|
||||
uptr fill_size = Min(orig_size, (uptr)flags()->max_free_fill_size);
|
||||
internal_memset(untagged_ptr, flags()->free_fill_byte, fill_size);
|
||||
uptr fill_size =
|
||||
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
|
||||
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
|
||||
}
|
||||
if (flags()->tag_in_free &&
|
||||
atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
|
||||
TagMemoryAligned((uptr)untagged_ptr, TaggedSize(orig_size),
|
||||
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
|
||||
t ? t->GenerateRandomTag() : kFallbackFreeTag);
|
||||
if (t) {
|
||||
allocator.Deallocate(t->allocator_cache(), untagged_ptr);
|
||||
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
|
||||
if (auto *ha = t->heap_allocations())
|
||||
ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
|
||||
free_context_id, static_cast<u32>(orig_size)});
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocator.Deallocate(cache, untagged_ptr);
|
||||
allocator.Deallocate(cache, aligned_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,8 +274,14 @@ static uptr AllocationSize(const void *tagged_ptr) {
|
|||
const void *untagged_ptr = UntagPtr(tagged_ptr);
|
||||
if (!untagged_ptr) return 0;
|
||||
const void *beg = allocator.GetBlockBegin(untagged_ptr);
|
||||
if (beg != untagged_ptr) return 0;
|
||||
Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
|
||||
if (b->right_aligned) {
|
||||
if (beg != reinterpret_cast<void *>(RoundDownTo(
|
||||
reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
|
||||
return 0;
|
||||
} else {
|
||||
if (beg != untagged_ptr) return 0;
|
||||
}
|
||||
return b->requested_size;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,8 @@
|
|||
namespace __hwasan {
|
||||
|
||||
struct Metadata {
|
||||
u32 requested_size; // sizes are < 4G.
|
||||
u32 requested_size : 31; // sizes are < 2G.
|
||||
u32 right_aligned : 1;
|
||||
u32 alloc_context_id;
|
||||
};
|
||||
|
||||
|
|
|
@ -37,6 +37,33 @@ HWASAN_FLAG(
|
|||
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
|
||||
"HWASan allocator flag. max_malloc_fill_size is the maximal amount of "
|
||||
"bytes that will be filled with malloc_fill_byte on malloc.")
|
||||
|
||||
// Rules for malloc alignment on aarch64:
|
||||
// * If the size is 16-aligned, then malloc should return 16-aligned memory.
|
||||
// * Otherwise, malloc should return 8-alignment memory.
|
||||
// So,
|
||||
// * If the size is 16-aligned, we don't need to do anything.
|
||||
// * Otherwise we don't have to obey 16-alignment, just the 8-alignment.
|
||||
// * We may want to break the 8-alignment rule to catch more buffer overflows
|
||||
// but this will break valid code in some rare cases, like this:
|
||||
// struct Foo {
|
||||
// // accessed via atomic instructions that require 8-alignment.
|
||||
// std::atomic<int64_t> atomic_stuff;
|
||||
// ...
|
||||
// char vla[1]; // the actual size of vla could be anything.
|
||||
// }
|
||||
// Which means that the safe values for malloc_align_right are 0, 8, 9,
|
||||
// and the values 1 and 2 may require changes in otherwise valid code.
|
||||
|
||||
HWASAN_FLAG(
|
||||
int, malloc_align_right, 0, // off by default
|
||||
"HWASan allocator flag. "
|
||||
"0 (default): allocations are always aligned left to 16-byte boundary; "
|
||||
"1: allocations are sometimes aligned right to 1-byte boundary (risky); "
|
||||
"2: allocations are always aligned right to 1-byte boundary (risky); "
|
||||
"8: allocations are sometimes aligned right to 8-byte boundary; "
|
||||
"9: allocations are always aligned right to 8-byte boundary."
|
||||
)
|
||||
HWASAN_FLAG(
|
||||
int, max_free_fill_size, 0,
|
||||
"HWASan allocator flag. max_free_fill_size is the maximal amount of "
|
||||
|
|
|
@ -1,10 +1,22 @@
|
|||
// RUN: %clang_hwasan %s -o %t
|
||||
// RUN: not %run %t 40 2>&1 | FileCheck %s --check-prefix=CHECK40
|
||||
// RUN: not %run %t 80 2>&1 | FileCheck %s --check-prefix=CHECK80
|
||||
// RUN: not %run %t 40 2>&1 | FileCheck %s --check-prefix=CHECK40-LEFT
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=2 not %run %t 40 2>&1 | FileCheck %s --check-prefix=CHECK40-RIGHT
|
||||
// RUN: not %run %t 80 2>&1 | FileCheck %s --check-prefix=CHECK80-LEFT
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=2 not %run %t 80 2>&1 | FileCheck %s --check-prefix=CHECK80-RIGHT
|
||||
// RUN: not %run %t -30 2>&1 | FileCheck %s --check-prefix=CHECKm30
|
||||
// RUN: not %run %t -30 1000000 2>&1 | FileCheck %s --check-prefix=CHECKMm30
|
||||
// RUN: not %run %t 1000000 1000000 2>&1 | FileCheck %s --check-prefix=CHECKM
|
||||
|
||||
// Test OOB within the granule.
|
||||
// Misses the bug when malloc is left-aligned, catches it otherwise.
|
||||
// RUN: %run %t 31
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=2 not %run %t 31 2>&1 | FileCheck %s --check-prefix=CHECK31
|
||||
|
||||
// RUN: %run %t 30 20
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=9 not %run %t 30 20 2>&1 | FileCheck %s --check-prefix=CHECK20-RIGHT8
|
||||
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=42 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-WRONG-FLAG
|
||||
|
||||
// REQUIRES: stable-runtime
|
||||
|
||||
#include <stdlib.h>
|
||||
|
@ -16,12 +28,18 @@ int main(int argc, char **argv) {
|
|||
int offset = argc < 2 ? 40 : atoi(argv[1]);
|
||||
int size = argc < 3 ? 30 : atoi(argv[2]);
|
||||
char * volatile x = (char*)malloc(size);
|
||||
fprintf(stderr, "base: %p access: %p\n", x, &x[offset]);
|
||||
x[offset] = 42;
|
||||
// CHECK40: allocated heap chunk; size: 32 offset: 8
|
||||
// CHECK40: is located 10 bytes to the right of 30-byte region
|
||||
|
||||
// CHECK40-LEFT: allocated heap chunk; size: 32 offset: 8
|
||||
// CHECK40-LEFT: is located 10 bytes to the right of 30-byte region
|
||||
// CHECK40-RIGHT: allocated heap chunk; size: 32 offset: 10
|
||||
// CHECK40-RIGHT: is located 10 bytes to the right of 30-byte region
|
||||
//
|
||||
// CHECK80: allocated heap chunk; size: 32 offset: 16
|
||||
// CHECK80: is located 50 bytes to the right of 30-byte region
|
||||
// CHECK80-LEFT: allocated heap chunk; size: 32 offset: 16
|
||||
// CHECK80-LEFT: is located 50 bytes to the right of 30-byte region
|
||||
// CHECK80-RIGHT: allocated heap chunk; size: 32 offset: 18
|
||||
// CHECK80-RIGHT: is located 50 bytes to the right of 30-byte region
|
||||
//
|
||||
// CHECKm30: allocated heap chunk; size: 32 offset: 2
|
||||
// CHECKm30: is located 30 bytes to the left of 30-byte region
|
||||
|
@ -31,5 +49,11 @@ int main(int argc, char **argv) {
|
|||
//
|
||||
// CHECKM: is a large allocated heap chunk; size: 1003520 offset: 1000000
|
||||
// CHECKM: is located 0 bytes to the right of 1000000-byte region
|
||||
//
|
||||
// CHECK31: is located 1 bytes to the right of 30-byte region
|
||||
//
|
||||
// CHECK20-RIGHT8: is located 10 bytes to the right of 20-byte region [0x{{.*}}8,0x{{.*}}c)
|
||||
//
|
||||
// CHECK-WRONG-FLAG: ERROR: unsupported value of malloc_align_right flag: 42
|
||||
free(x);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
// Tests malloc_align_right=1 and 8 (randomly aligning right).
|
||||
// RUN: %clang_hwasan %s -o %t
|
||||
//
|
||||
// RUN: %run %t
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1
|
||||
// RUN: %env_hwasan_opts=malloc_align_right=8 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK8
|
||||
|
||||
// REQUIRES: stable-runtime
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <sanitizer/hwasan_interface.h>
|
||||
|
||||
static volatile void *sink;
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
__hwasan_enable_allocator_tagging();
|
||||
|
||||
// Perform 1000 buffer overflows within the 16-byte granule,
|
||||
// so that random right-alignment has a very high chance of
|
||||
// catching at least one of them.
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
char *p = (char*)malloc(20);
|
||||
sink = p;
|
||||
fprintf(stderr, "[%d] p: %p; accessing p[20]:\n", i, p);
|
||||
p[20 * argc] = 0; // requires malloc_align_right=1 to catch
|
||||
fprintf(stderr, "[%d] p: %p; accessing p[30]:\n", i, p);
|
||||
p[30 * argc] = 0; // requires malloc_align_right={1,8} to catch
|
||||
// CHECK1: accessing p[20]
|
||||
// CHECK1-NEXT: HWAddressSanitizer: tag-mismatch
|
||||
// CHECK8: accessing p[30]:
|
||||
// CHECK8-NEXT: HWAddressSanitizer: tag-mismatch
|
||||
}
|
||||
}
|
||||
|
|
@ -22,8 +22,8 @@ int main() {
|
|||
if (ISREAD) r = x[5]; else x[5] = 42; // should be on the same line.
|
||||
// CHECK: [[TYPE]] of size 1 at {{.*}} tags: [[PTR_TAG:[0-9a-f][0-9a-f]]]/[[MEM_TAG:[0-9a-f][0-9a-f]]] (ptr/mem)
|
||||
// CHECK: #0 {{.*}} in main {{.*}}use-after-free.c:[[@LINE-2]]
|
||||
|
||||
// CHECK: is a small unallocated heap chunk; size: 16 offset: 5
|
||||
// Offset is 5 or 11 depending on left/right alignment.
|
||||
// CHECK: is a small unallocated heap chunk; size: 16 offset: {{5|11}}
|
||||
// CHECK: is located 5 bytes inside of 10-byte region
|
||||
//
|
||||
// CHECK: freed by thread {{.*}} here:
|
||||
|
|
Loading…
Reference in New Issue