[scudo] Modify Scudo to use its own Secondary Allocator

Summary:
The Sanitizer Secondary Allocator was not entirely ideal was Scudo for several
reasons: decent amount of unneeded code, redundant checks already performed by
the front end, unneeded data structures, difficulty to properly protect the
secondary chunks header.

Given that the second allocator is pretty straight forward, Scudo will use its
own, trimming all the unneeded code off of the Sanitizer one. A significant
difference in terms of security is that now each secondary chunk is preceded
and followed by a guard page, thus mitigating overflows into and from the
chunk.

A test was added as well to illustrate the overflow & underflow situations
into the guard pages.

Reviewers: kcc

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D24737

llvm-svn: 281938
This commit is contained in:
Kostya Kortchinsky 2016-09-19 21:11:55 +00:00
parent 233374c4d1
commit 3beafffcca
3 changed files with 195 additions and 2 deletions

View File

@ -16,6 +16,7 @@
#include "scudo_allocator.h" #include "scudo_allocator.h"
#include "scudo_utils.h" #include "scudo_utils.h"
#include "scudo_allocator_secondary.h"
#include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_quarantine.h" #include "sanitizer_common/sanitizer_quarantine.h"
@ -44,7 +45,7 @@ struct AP {
typedef SizeClassAllocator64<AP> PrimaryAllocator; typedef SizeClassAllocator64<AP> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator; typedef ScudoLargeMmapAllocator SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
ScudoAllocator; ScudoAllocator;
@ -348,7 +349,7 @@ struct Allocator {
} else { } else {
SpinMutexLock l(&FallbackMutex); SpinMutexLock l(&FallbackMutex);
Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
MinAlignment); MinAlignment);
} }
if (!Ptr) if (!Ptr)
return BackendAllocator.ReturnNullOrDie(); return BackendAllocator.ReturnNullOrDie();

View File

@ -0,0 +1,138 @@
//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo Secondary Allocator.
/// This services allocation that are too large to be serviced by the Primary
/// Allocator. It is directly backed by the memory mapping functions of the
/// operating system.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
#define SCUDO_ALLOCATOR_SECONDARY_H_
namespace __scudo {
class ScudoLargeMmapAllocator {
public:
void Init(bool AllocatorMayReturnNull) {
PageSize = GetPageSizeCached();
atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
// Account for 2 guard pages, one before and one after the chunk.
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize + 2 * PageSize));
CHECK_NE(MapBeg, ~static_cast<uptr>(0));
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
MapBeg += PageSize;
CHECK_EQ(MapBeg, reinterpret_cast<uptr>(MmapFixedOrDie(MapBeg, MapSize)));
uptr MapEnd = MapBeg + MapSize;
uptr Ptr = MapBeg + sizeof(SecondaryHeader);
// TODO(kostyak): add a random offset to Ptr.
CHECK_GT(Ptr + Size, MapBeg);
CHECK_LE(Ptr + Size, MapEnd);
SecondaryHeader *Header = getHeader(Ptr);
Header->MapBeg = MapBeg - PageSize;
Header->MapSize = MapSize + 2 * PageSize;
Stats->Add(AllocatorStatAllocated, MapSize);
Stats->Add(AllocatorStatMapped, MapSize);
return reinterpret_cast<void *>(Ptr);
}
void *ReturnNullOrDie() {
if (atomic_load(&MayReturnNull, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull();
}
void SetMayReturnNull(bool AllocatorMayReturnNull) {
atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
Stats->Sub(AllocatorStatAllocated, Header->MapSize);
Stats->Sub(AllocatorStatMapped, Header->MapSize);
UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
}
uptr TotalMemoryUsed() {
UNIMPLEMENTED();
}
bool PointerIsMine(const void *Ptr) {
UNIMPLEMENTED();
}
uptr GetActuallyAllocatedSize(void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
uptr MapEnd = Header->MapBeg + Header->MapSize;
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
void *GetMetaData(const void *Ptr) {
UNIMPLEMENTED();
}
void *GetBlockBegin(const void *Ptr) {
UNIMPLEMENTED();
}
void *GetBlockBeginFastLocked(void *Ptr) {
UNIMPLEMENTED();
}
void PrintStats() {
UNIMPLEMENTED();
}
void ForceLock() {
UNIMPLEMENTED();
}
void ForceUnlock() {
UNIMPLEMENTED();
}
void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
UNIMPLEMENTED();
}
private:
// A Secondary allocated chunk header contains the base of the mapping and
// its size. Currently, the base is always a page before the header, but
// we might want to extend that number in the future based on the size of
// the allocation.
struct SecondaryHeader {
uptr MapBeg;
uptr MapSize;
};
// Check that sizeof(SecondaryHeader) is a multiple of 16.
COMPILER_CHECK((sizeof(SecondaryHeader) & 0xf) == 0);
SecondaryHeader *getHeader(uptr Ptr) {
return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
}
SecondaryHeader *getHeader(const void *Ptr) {
return getHeader(reinterpret_cast<uptr>(Ptr));
}
uptr PageSize;
atomic_uint8_t MayReturnNull;
};
} // namespace __scudo
#endif // SCUDO_ALLOCATOR_SECONDARY_H_

View File

@ -0,0 +1,54 @@
// RUN: %clang_scudo %s -o %t
// RUN: %run %t after 2>&1 | FileCheck %s
// RUN: %run %t before 2>&1 | FileCheck %s
// Test that we hit a guard page when writing past the end of a chunk
// allocated by the Secondary allocator, or writing too far in front of it.
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <signal.h>
#include <assert.h>
void handler(int signo, siginfo_t *info, void *uctx) {
if (info->si_code == SEGV_ACCERR) {
fprintf(stderr, "SCUDO SIGSEGV\n");
exit(0);
}
exit(1);
}
int main(int argc, char **argv)
{
// The size must be large enough to be serviced by the secondary allocator.
long page_size = sysconf(_SC_PAGESIZE);
size_t size = (1U << 17) + page_size;
struct sigaction a;
assert(argc == 2);
memset(&a, 0, sizeof(a));
a.sa_sigaction = handler;
a.sa_flags = SA_SIGINFO;
char *p = (char *)malloc(size);
if (!p)
return 1;
memset(p, 'A', size); // This should not trigger anything.
// Set up the SIGSEGV handler now, as the rest should trigger an AV.
sigaction(SIGSEGV, &a, nullptr);
if (!strcmp(argv[1], "after")) {
for (int i = 0; i < page_size; i++)
p[size + i] = 'A';
}
if (!strcmp(argv[1], "before")) {
for (int i = 1; i < page_size; i++)
p[-i] = 'A';
}
free(p);
return 1; // A successful test means we shouldn't reach this.
}
// CHECK: SCUDO SIGSEGV