[ASan] Change activation strategy.

Now ASan deactivation doesn't modify common or ASan-specific runtime
flags. Flags stay constant after initialization, and "deactivation"
instead stashes initialized runtime state, and deactivates the
runtime. Activation then just restores the original state (possibly,
overriden by some activation flags provided in system property on
Android).

llvm-svn: 224614
This commit is contained in:
Alexey Samsonov 2014-12-19 20:35:53 +00:00
parent 30f330b39b
commit 04eeec32e6
7 changed files with 58 additions and 35 deletions

View File

@ -27,23 +27,12 @@ static struct AsanDeactivatedFlags {
int malloc_context_size;
bool poison_heap;
void CopyFrom(const Flags *f, const CommonFlags *cf) {
allocator_options.SetFrom(f, cf);
malloc_context_size = cf->malloc_context_size;
poison_heap = f->poison_heap;
}
void OverrideFromActivationFlags() {
Flags f;
CommonFlags cf;
// Copy the current activation flags.
f.quarantine_size = allocator_options.quarantine_size_mb << 20;
f.redzone = allocator_options.min_redzone;
f.max_redzone = allocator_options.max_redzone;
cf.allocator_may_return_null = allocator_options.may_return_null;
f.alloc_dealloc_mismatch = allocator_options.alloc_dealloc_mismatch;
allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
f.poison_heap = poison_heap;
@ -55,7 +44,9 @@ static struct AsanDeactivatedFlags {
ParseCommonFlagsFromString(&cf, buf);
ParseFlagsFromString(&f, buf);
CopyFrom(&f, &cf);
allocator_options.SetFrom(&f, &cf);
malloc_context_size = cf.malloc_context_size;
poison_heap = f.poison_heap;
}
void Print() {
@ -71,20 +62,25 @@ static struct AsanDeactivatedFlags {
static bool asan_is_deactivated;
void AsanStartDeactivated() {
void AsanDeactivate() {
CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n");
// Save flag values.
asan_deactivated_flags.CopyFrom(flags(), common_flags());
// FIXME: Don't overwrite commandline flags. Instead, make the flags store
// the original values calculated during flag parsing, and re-initialize
// the necessary runtime objects.
flags()->quarantine_size = 0;
flags()->max_redzone = 16;
flags()->poison_heap = false;
common_flags()->malloc_context_size = 0;
flags()->alloc_dealloc_mismatch = false;
common_flags()->allocator_may_return_null = true;
// Stash runtime state.
GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
asan_deactivated_flags.poison_heap = CanPoisonMemory();
// Deactivate the runtime.
SetCanPoisonMemory(false);
SetMallocContextSize(1);
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
ReInitializeAllocator(disabled);
asan_is_deactivated = true;
}

View File

@ -16,7 +16,7 @@
#define ASAN_ACTIVATION_H
namespace __asan {
void AsanStartDeactivated();
void AsanDeactivate();
void AsanActivate();
} // namespace __asan

View File

@ -213,6 +213,14 @@ void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size = (int)quarantine_size_mb << 20;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
@ -263,6 +271,15 @@ struct Allocator {
SharedInitCode(options);
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
}
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
@ -662,6 +679,10 @@ void ReInitializeAllocator(const AllocatorOptions &options) {
instance.ReInitialize(options);
}
void GetAllocatorOptions(AllocatorOptions *options) {
instance.GetOptions(options);
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}

View File

@ -40,10 +40,12 @@ struct AllocatorOptions {
u8 alloc_dealloc_mismatch;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
};
void InitializeAllocator(const AllocatorOptions &options);
void ReInitializeAllocator(const AllocatorOptions &options);
void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:

View File

@ -247,14 +247,9 @@ void InitializeFlags(Flags *f) {
VReport(1, "Parsed ASAN_OPTIONS: %s\n", env);
}
// If ASan starts in deactivated state, stash and clear some flags.
// Otherwise, let activation flags override current settings.
if (flags()->start_deactivated) {
AsanStartDeactivated();
} else {
// Parse flags that may change between startup and activation.
// On Android they come from a system property.
// On other platforms this is no-op.
// Let activation flags override current settings. On Android they come
// from a system property. On other platforms this is no-op.
if (!flags()->start_deactivated) {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
ParseCommonFlagsFromString(cf, buf);

View File

@ -397,6 +397,11 @@ static void AsanInitInternal() {
MaybeStartBackgroudThread();
// Now that ASan runtime is (mostly) initialized, deactivate it if
// necessary, so that it can be re-activated when requested.
if (flags()->start_deactivated)
AsanDeactivate();
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;

View File

@ -1300,8 +1300,12 @@ class CombinedAllocator {
return res;
}
bool MayReturnNull() const {
return atomic_load(&may_return_null_, memory_order_acquire);
}
void *ReturnNullOrDie() {
if (atomic_load(&may_return_null_, memory_order_acquire))
if (MayReturnNull())
return 0;
ReportAllocatorCannotReturnNull();
}