[sanitizer] Decorate /proc/self/maps better.

Summary:
Refactor the way /proc/self/maps entries are annotated to support most
(all?) posix platforms, with a special implementation for Android.
Extend the set of decorated Mmap* calls.

Replace shm_open with internal_open("/dev/shm/%s"). Shm_open is
problematic because it calls libc open() which may be intercepted.

Generic implementation has limits (max number of files under /dev/shm is
64K on my machine), which can be conceivably reached when sanitizing
multiple programs at once. Android implemenation is essentially free, and
enabled by default.

The test in sanitizer_common is copied to hwasan and not reused directly
because hwasan fails way too many common tests at the moment.

Reviewers: pcc, vitalybuka

Subscribers: srhines, kubamracek, jfb, llvm-commits, kcc

Differential Revision: https://reviews.llvm.org/D57720

llvm-svn: 353255
This commit is contained in:
Evgeniy Stepanov 2019-02-06 01:14:50 +00:00
parent edd2e05851
commit 443c034391
10 changed files with 174 additions and 93 deletions

View File

@ -186,7 +186,7 @@ void ShadowBuilder::Install() {
GetShadowSize(), MAP_FIXED);
CHECK(res != MAP_FAILED);
#else
void *res = MmapFixedOrDie(shadow_, GetShadowSize());
void *res = MmapFixedOrDie(shadow_, GetShadowSize(), "cfi shadow");
CHECK(res != MAP_FAILED);
::memcpy(&shadow_, &main_shadow, GetShadowSize());
#endif

View File

@ -190,17 +190,13 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
#if SANITIZER_ANDROID
static char *memory_usage_buffer = nullptr;
#define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0
static void InitMemoryUsage() {
memory_usage_buffer =
(char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string");
CHECK(memory_usage_buffer);
memory_usage_buffer[0] = '\0';
CHECK(internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
(uptr)memory_usage_buffer, kMemoryUsageBufferSize,
(uptr)memory_usage_buffer) == 0);
DecorateMapping((uptr)memory_usage_buffer, kMemoryUsageBufferSize,
memory_usage_buffer);
}
void UpdateMemoryUsage() {

View File

@ -80,7 +80,8 @@ class SizeClassAllocator64 {
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
"SizeClassAllocator: region info");
// Check that the RegionInfo array is aligned on the CacheLine size.
DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
}
@ -633,8 +634,8 @@ class SizeClassAllocator64 {
return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
}
bool MapWithCallback(uptr beg, uptr size) {
uptr mapped = address_range.Map(beg, size);
bool MapWithCallback(uptr beg, uptr size, const char *name) {
uptr mapped = address_range.Map(beg, size, name);
if (UNLIKELY(!mapped))
return false;
CHECK_EQ(beg, mapped);
@ -642,8 +643,8 @@ class SizeClassAllocator64 {
return true;
}
void MapWithCallbackOrDie(uptr beg, uptr size) {
CHECK_EQ(beg, address_range.MapOrDie(beg, size));
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
MapUnmapCallback().OnMap(beg, size);
}
@ -661,7 +662,8 @@ class SizeClassAllocator64 {
uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
region->mapped_free_array;
uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size)))
if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
"SizeClassAllocator: freearray")))
return false;
region->mapped_free_array = new_mapped_free_array;
}
@ -712,7 +714,8 @@ class SizeClassAllocator64 {
if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
return false;
if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
user_map_size)))
user_map_size,
"SizeClassAllocator: region data")))
return false;
stat->Add(AllocatorStatMapped, user_map_size);
region->mapped_user += user_map_size;
@ -732,7 +735,7 @@ class SizeClassAllocator64 {
return false;
if (UNLIKELY(!MapWithCallback(
GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
meta_map_size)))
meta_map_size, "SizeClassAllocator: region metadata")))
return false;
region->mapped_meta += meta_map_size;
}

View File

@ -101,10 +101,11 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
WARN_UNUSED_RESULT;
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
// that case returns nullptr.
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
const char *name = nullptr);
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
void *MmapNoAccess(uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
@ -140,8 +141,8 @@ void RunFreeHooks(const void *ptr);
class ReservedAddressRange {
public:
uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
uptr Map(uptr fixed_addr, uptr size);
uptr MapOrDie(uptr fixed_addr, uptr size);
uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
void Unmap(uptr addr, uptr size);
void *base() const { return base_; }
uptr size() const { return size_; }

View File

@ -218,9 +218,9 @@ COMMON_FLAG(bool, intercept_stat, true,
COMMON_FLAG(bool, intercept_send, true,
"If set, uses custom wrappers for send* functions "
"to find more errors.")
COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
"mappings in /proc/self/maps with "
"user-readable names")
COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
"If set, decorate sanitizer mappings in /proc/self/maps with "
"user-readable names")
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(

View File

@ -43,9 +43,8 @@ uptr GetMmapGranularity() {
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = internal_mmap(nullptr, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, mem_type);
int reserrno;
if (UNLIKELY(internal_iserror(res, &reserrno)))
ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
@ -66,9 +65,8 @@ void UnmapOrDie(void *addr, uptr size) {
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = internal_mmap(nullptr, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, mem_type);
int reserrno;
if (UNLIKELY(internal_iserror(res, &reserrno))) {
if (reserrno == ENOMEM)
@ -103,12 +101,9 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
}
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
uptr PageSize = GetPageSizeCached();
uptr p = internal_mmap(nullptr,
RoundUpTo(size, PageSize),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-1, 0);
size = RoundUpTo(size, GetPageSizeCached());
uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
int reserrno;
if (UNLIKELY(internal_iserror(p, &reserrno)))
ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
@ -116,13 +111,12 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
return (void *)p;
}
void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) {
uptr PageSize = GetPageSizeCached();
uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
RoundUpTo(size, PageSize),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, 0);
static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
const char *name) {
size = RoundUpTo(size, GetPageSizeCached());
fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
int reserrno;
if (UNLIKELY(internal_iserror(p, &reserrno))) {
if (tolerate_enomem && reserrno == ENOMEM)
@ -136,12 +130,12 @@ void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) {
return (void *)p;
}
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/);
void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
}
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/);
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
}
bool MprotectNoAccess(uptr addr, uptr size) {
@ -343,6 +337,53 @@ bool ShouldMockFailureToOpen(const char *path) {
internal_strncmp(path, "/proc/", 6) == 0;
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
int GetNamedMappingFd(const char *name, uptr size, int *flags) {
if (!common_flags()->decorate_proc_maps || !name)
return -1;
char shmname[200];
CHECK(internal_strlen(name) < sizeof(shmname) - 10);
internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
internal_getpid(), name);
int fd = ReserveStandardFds(
internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, S_IRWXU));
CHECK_GE(fd, 0);
int res = internal_ftruncate(fd, size);
CHECK_EQ(0, res);
res = internal_unlink(shmname);
CHECK_EQ(0, res);
*flags &= ~(MAP_ANON | MAP_ANONYMOUS);
return fd;
}
#else
int GetNamedMappingFd(const char *name, uptr size, int *flags) {
return -1;
}
#endif
#if SANITIZER_ANDROID
#define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0
void DecorateMapping(uptr addr, uptr size, const char *name) {
if (!common_flags()->decorate_proc_maps || !name)
return;
CHECK(internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size,
(uptr)name) == 0);
}
#else
void DecorateMapping(uptr addr, uptr size, const char *name) {
}
#endif
uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {
int fd = GetNamedMappingFd(name, length, &flags);
uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
if (!internal_iserror(res))
DecorateMapping(res, length, name);
return res;
}
} // namespace __sanitizer
#endif // SANITIZER_POSIX

View File

@ -104,6 +104,18 @@ fd_t ReserveStandardFds(fd_t fd);
bool ShouldMockFailureToOpen(const char *path);
// Create a non-file mapping with a given /proc/self/maps name.
uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name);
// Platforms should implement at most one of these.
// 1. Provide a pre-decorated file descriptor to use instead of an anonymous
// mapping.
int GetNamedMappingFd(const char *name, uptr size, int *flags);
// 2. Add name to an existing anonymous mapping. The caller must keep *name
// alive at least as long as the mapping exists.
void DecorateMapping(uptr addr, uptr size, const char *name);
} // namespace __sanitizer
#endif // SANITIZER_POSIX_H

View File

@ -307,37 +307,11 @@ void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
MemoryMappingLayout::CacheMemoryMappings();
}
#if SANITIZER_ANDROID || SANITIZER_GO
int GetNamedMappingFd(const char *name, uptr size) {
return -1;
}
#else
int GetNamedMappingFd(const char *name, uptr size) {
if (!common_flags()->decorate_proc_maps)
return -1;
char shmname[200];
CHECK(internal_strlen(name) < sizeof(shmname) - 10);
internal_snprintf(shmname, sizeof(shmname), "%zu [%s]", internal_getpid(),
name);
int fd = shm_open(shmname, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU);
CHECK_GE(fd, 0);
int res = internal_ftruncate(fd, size);
CHECK_EQ(0, res);
res = shm_unlink(shmname);
CHECK_EQ(0, res);
return fd;
}
#endif
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
if (fd == -1) flags |= MAP_ANON;
uptr PageSize = GetPageSizeCached();
uptr p = internal_mmap((void *)(fixed_addr & ~(PageSize - 1)),
RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE,
flags, fd, 0);
size = RoundUpTo(size, GetPageSizeCached());
fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON, name);
int reserrno;
if (internal_iserror(p, &reserrno)) {
Report("ERROR: %s failed to "
@ -350,12 +324,8 @@ bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
// We don't pass `name` along because, when you enable `decorate_proc_maps`
// AND actually use a named mapping AND are using a sanitizer intercepting
// `open` (e.g. TSAN, ESAN), then you'll get a failure during initialization.
// TODO(flowerhack): Fix the implementation of GetNamedMappingFd to solve
// this problem.
base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);
base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size, name)
: MmapNoAccess(size);
size_ = size;
name_ = name;
(void)os_handle_; // unsupported
@ -364,12 +334,14 @@ uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size) {
return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
return reinterpret_cast<uptr>(
MmapFixedOrDieOnFatalError(fixed_addr, size, name));
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size) {
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
const char *name) {
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size, name));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
@ -384,12 +356,9 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
if (fd == -1) flags |= MAP_ANON;
return (void *)internal_mmap((void *)fixed_addr, size, PROT_NONE, flags, fd,
0);
return (void *)MmapNamed((void *)fixed_addr, size, PROT_NONE,
MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
name);
}
void *MmapNoAccess(uptr size) {

View File

@ -229,7 +229,7 @@ bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
// 'MmapFixedNoAccess'.
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_COMMIT, PAGE_READWRITE);
if (p == 0) {
@ -260,7 +260,7 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
UnmapOrDie(reinterpret_cast<void*>(addr), size);
}
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_COMMIT, PAGE_READWRITE);
if (p == 0) {

View File

@ -0,0 +1,59 @@
// RUN: %clang_hwasan -g %s -o %t
// RUN: %env_hwasan_opts=decorate_proc_maps=1 %run %t 2>&1 | FileCheck %s --check-prefix=A
// RUN: %env_hwasan_opts=decorate_proc_maps=1 %run %t 2>&1 | FileCheck %s --check-prefix=B
// A: rw-p {{.*}}hwasan threads]
// A-NEXT: ---p {{.*}}shadow gap]
// A-NEXT: rw-p {{.*}}low shadow]
// A-NEXT: ---p {{.*}}shadow gap]
// A-NEXT: rw-p {{.*}}high shadow]
// B-DAG: rw-p {{.*}}SizeClassAllocator: region data]
// B-DAG: rw-p {{.*}}SizeClassAllocator: region metadata]
// B-DAG: rw-p {{.*}}SizeClassAllocator: freearray]
// B-DAG: rw-p {{.*}}SizeClassAllocator: region info]
// B-DAG: rw-p {{.*}}LargeMmapAllocator]
// B-DAG: rw-p {{.*}}stack depot]
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <pthread.h>
#include <stdlib.h>
void CopyFdToFd(int in_fd, int out_fd) {
const size_t kBufSize = 0x10000;
static char buf[kBufSize];
while (1) {
ssize_t got = read(in_fd, buf, kBufSize);
if (got > 0) {
write(out_fd, buf, got);
} else if (got == 0) {
break;
} else if (errno != EAGAIN || errno != EWOULDBLOCK || errno != EINTR) {
fprintf(stderr, "error reading file, errno %d\n", errno);
abort();
}
}
}
void *ThreadFn(void *arg) {
(void)arg;
int fd = open("/proc/self/maps", O_RDONLY);
CopyFdToFd(fd, 2);
close(fd);
return NULL;
}
int main(void) {
pthread_t t;
void * volatile res = malloc(100);
void * volatile res2 = malloc(100000);
pthread_create(&t, 0, ThreadFn, 0);
pthread_join(t, 0);
return (int)(size_t)res;
}