2012-06-04 22:27:50 +08:00
|
|
|
//===-- sanitizer_linux.cc ------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is shared between AddressSanitizer and ThreadSanitizer
|
|
|
|
// run-time libraries and implements linux-specific functions from
|
|
|
|
// sanitizer_libc.h.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifdef __linux__
|
|
|
|
|
2012-06-07 14:15:12 +08:00
|
|
|
#include "sanitizer_common.h"
|
2012-06-05 22:25:27 +08:00
|
|
|
#include "sanitizer_internal_defs.h"
|
2012-06-04 22:27:50 +08:00
|
|
|
#include "sanitizer_libc.h"
|
2013-02-27 19:22:40 +08:00
|
|
|
#include "sanitizer_linux.h"
|
2012-12-01 10:39:45 +08:00
|
|
|
#include "sanitizer_mutex.h"
|
2012-07-03 16:24:14 +08:00
|
|
|
#include "sanitizer_placement_new.h"
|
2012-06-07 14:15:12 +08:00
|
|
|
#include "sanitizer_procmaps.h"
|
2012-12-13 17:34:23 +08:00
|
|
|
#include "sanitizer_stacktrace.h"
|
2012-06-04 22:27:50 +08:00
|
|
|
|
2013-02-27 21:03:35 +08:00
|
|
|
#include <errno.h>
|
2012-06-05 15:05:10 +08:00
|
|
|
#include <fcntl.h>
|
2012-06-07 15:13:46 +08:00
|
|
|
#include <pthread.h>
|
2012-06-18 16:44:30 +08:00
|
|
|
#include <sched.h>
|
2012-06-04 22:27:50 +08:00
|
|
|
#include <sys/mman.h>
|
2013-02-27 19:22:40 +08:00
|
|
|
#include <sys/ptrace.h>
|
2012-06-07 15:13:46 +08:00
|
|
|
#include <sys/resource.h>
|
2012-06-05 15:05:10 +08:00
|
|
|
#include <sys/stat.h>
|
2012-06-04 22:27:50 +08:00
|
|
|
#include <sys/syscall.h>
|
2012-06-07 15:13:46 +08:00
|
|
|
#include <sys/time.h>
|
2012-06-04 22:27:50 +08:00
|
|
|
#include <sys/types.h>
|
2013-01-30 22:39:27 +08:00
|
|
|
#include <sys/prctl.h>
|
2012-06-04 22:27:50 +08:00
|
|
|
#include <unistd.h>
|
2012-12-13 17:34:23 +08:00
|
|
|
#include <unwind.h>
|
2013-02-27 21:03:35 +08:00
|
|
|
|
|
|
|
#if !defined(__ANDROID__) && !defined(ANDROID)
|
|
|
|
#include <sys/signal.h>
|
|
|
|
#endif
|
2013-01-30 22:39:27 +08:00
|
|
|
|
|
|
|
// <linux/futex.h> is broken on some linux distributions.
|
|
|
|
const int FUTEX_WAIT = 0;
|
|
|
|
const int FUTEX_WAKE = 1;
|
2012-06-04 22:27:50 +08:00
|
|
|
|
2012-11-19 15:53:36 +08:00
|
|
|
// Are we using 32-bit or 64-bit syscalls?
|
2012-11-21 20:38:58 +08:00
|
|
|
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
|
2012-11-20 16:57:26 +08:00
|
|
|
// but it still needs to use 64-bit syscalls.
|
2012-11-21 20:38:58 +08:00
|
|
|
#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
|
2012-11-19 15:53:36 +08:00
|
|
|
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
|
|
|
|
#else
|
|
|
|
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
|
|
|
|
#endif
|
|
|
|
|
2012-06-04 22:27:50 +08:00
|
|
|
namespace __sanitizer {
|
|
|
|
|
2012-06-07 15:13:46 +08:00
|
|
|
// --------------- sanitizer_libc.h
|
2012-06-04 22:27:50 +08:00
|
|
|
void *internal_mmap(void *addr, uptr length, int prot, int flags,
|
|
|
|
int fd, u64 offset) {
|
2012-11-19 15:53:36 +08:00
|
|
|
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
2012-06-04 22:27:50 +08:00
|
|
|
return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
|
|
|
|
#else
|
|
|
|
return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-06-05 17:49:25 +08:00
|
|
|
int internal_munmap(void *addr, uptr length) {
|
|
|
|
return syscall(__NR_munmap, addr, length);
|
|
|
|
}
|
|
|
|
|
2012-06-05 16:32:53 +08:00
|
|
|
int internal_close(fd_t fd) {
|
|
|
|
return syscall(__NR_close, fd);
|
|
|
|
}
|
|
|
|
|
2013-02-01 23:58:46 +08:00
|
|
|
fd_t internal_open(const char *filename, int flags) {
|
|
|
|
return syscall(__NR_open, filename, flags);
|
|
|
|
}
|
|
|
|
|
2013-02-02 00:32:18 +08:00
|
|
|
fd_t internal_open(const char *filename, int flags, u32 mode) {
|
2013-02-01 23:58:46 +08:00
|
|
|
return syscall(__NR_open, filename, flags, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
fd_t OpenFile(const char *filename, bool write) {
|
|
|
|
return internal_open(filename,
|
2012-06-06 22:11:31 +08:00
|
|
|
write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
|
2012-06-05 15:05:10 +08:00
|
|
|
}
|
|
|
|
|
2012-06-05 16:32:53 +08:00
|
|
|
uptr internal_read(fd_t fd, void *buf, uptr count) {
|
2012-10-02 21:41:40 +08:00
|
|
|
sptr res;
|
|
|
|
HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count));
|
|
|
|
return res;
|
2012-06-05 16:32:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
uptr internal_write(fd_t fd, const void *buf, uptr count) {
|
2012-10-02 21:41:40 +08:00
|
|
|
sptr res;
|
|
|
|
HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count));
|
|
|
|
return res;
|
2012-06-05 16:32:53 +08:00
|
|
|
}
|
|
|
|
|
2013-02-04 18:16:50 +08:00
|
|
|
int internal_stat(const char *path, void *buf) {
|
|
|
|
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
|
|
|
return syscall(__NR_stat, path, buf);
|
|
|
|
#else
|
|
|
|
return syscall(__NR_stat64, path, buf);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_lstat(const char *path, void *buf) {
|
|
|
|
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
|
|
|
return syscall(__NR_lstat, path, buf);
|
|
|
|
#else
|
|
|
|
return syscall(__NR_lstat64, path, buf);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_fstat(fd_t fd, void *buf) {
|
|
|
|
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
|
|
|
return syscall(__NR_fstat, fd, buf);
|
|
|
|
#else
|
|
|
|
return syscall(__NR_fstat64, fd, buf);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-06-06 15:30:33 +08:00
|
|
|
uptr internal_filesize(fd_t fd) {
|
2012-11-19 15:53:36 +08:00
|
|
|
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
2012-07-03 16:24:14 +08:00
|
|
|
struct stat st;
|
|
|
|
#else
|
|
|
|
struct stat64 st;
|
|
|
|
#endif
|
2013-02-04 18:16:50 +08:00
|
|
|
if (internal_fstat(fd, &st))
|
|
|
|
return -1;
|
2012-06-06 15:30:33 +08:00
|
|
|
return (uptr)st.st_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_dup2(int oldfd, int newfd) {
|
|
|
|
return syscall(__NR_dup2, oldfd, newfd);
|
|
|
|
}
|
|
|
|
|
2012-09-05 22:48:24 +08:00
|
|
|
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
|
|
|
|
return (uptr)syscall(__NR_readlink, path, buf, bufsize);
|
|
|
|
}
|
|
|
|
|
2012-06-18 16:44:30 +08:00
|
|
|
int internal_sched_yield() {
|
|
|
|
return syscall(__NR_sched_yield);
|
|
|
|
}
|
|
|
|
|
2013-02-20 21:54:32 +08:00
|
|
|
void internal__exit(int exitcode) {
|
|
|
|
syscall(__NR_exit_group, exitcode);
|
|
|
|
Die(); // Unreachable.
|
|
|
|
}
|
|
|
|
|
2012-06-07 15:13:46 +08:00
|
|
|
// ----------------- sanitizer_common.h
|
2012-11-09 22:45:30 +08:00
|
|
|
bool FileExists(const char *filename) {
|
2012-11-19 15:53:36 +08:00
|
|
|
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
2012-11-09 22:45:30 +08:00
|
|
|
struct stat st;
|
|
|
|
if (syscall(__NR_stat, filename, &st))
|
|
|
|
return false;
|
|
|
|
#else
|
|
|
|
struct stat64 st;
|
|
|
|
if (syscall(__NR_stat64, filename, &st))
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
// Sanity check: filename is a regular file.
|
|
|
|
return S_ISREG(st.st_mode);
|
|
|
|
}
|
|
|
|
|
2012-10-02 20:58:14 +08:00
|
|
|
uptr GetTid() {
|
|
|
|
return syscall(__NR_gettid);
|
|
|
|
}
|
|
|
|
|
2012-06-07 15:32:00 +08:00
|
|
|
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
|
2012-06-07 15:13:46 +08:00
|
|
|
uptr *stack_bottom) {
|
|
|
|
static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
|
|
|
|
CHECK(stack_top);
|
|
|
|
CHECK(stack_bottom);
|
2012-06-07 15:32:00 +08:00
|
|
|
if (at_initialization) {
|
2012-06-07 15:13:46 +08:00
|
|
|
// This is the main thread. Libpthread may not be initialized yet.
|
|
|
|
struct rlimit rl;
|
2012-06-20 23:19:17 +08:00
|
|
|
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
|
2012-06-07 15:13:46 +08:00
|
|
|
|
|
|
|
// Find the mapping that contains a stack variable.
|
2012-08-27 21:48:48 +08:00
|
|
|
MemoryMappingLayout proc_maps;
|
2012-06-07 15:13:46 +08:00
|
|
|
uptr start, end, offset;
|
|
|
|
uptr prev_end = 0;
|
2013-03-13 14:51:02 +08:00
|
|
|
while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
|
2012-06-07 15:13:46 +08:00
|
|
|
if ((uptr)&rl < end)
|
|
|
|
break;
|
|
|
|
prev_end = end;
|
|
|
|
}
|
|
|
|
CHECK((uptr)&rl >= start && (uptr)&rl < end);
|
|
|
|
|
|
|
|
// Get stacksize from rlimit, but clip it so that it does not overlap
|
|
|
|
// with other mappings.
|
|
|
|
uptr stacksize = rl.rlim_cur;
|
|
|
|
if (stacksize > end - prev_end)
|
|
|
|
stacksize = end - prev_end;
|
|
|
|
// When running with unlimited stack size, we still want to set some limit.
|
|
|
|
// The unlimited stack size is caused by 'ulimit -s unlimited'.
|
|
|
|
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
|
|
|
|
if (stacksize > kMaxThreadStackSize)
|
|
|
|
stacksize = kMaxThreadStackSize;
|
|
|
|
*stack_top = end;
|
|
|
|
*stack_bottom = end - stacksize;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pthread_attr_t attr;
|
2012-06-20 23:19:17 +08:00
|
|
|
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
|
2012-06-07 15:13:46 +08:00
|
|
|
uptr stacksize = 0;
|
|
|
|
void *stackaddr = 0;
|
|
|
|
pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
|
|
|
|
pthread_attr_destroy(&attr);
|
|
|
|
|
|
|
|
*stack_top = (uptr)stackaddr + stacksize;
|
|
|
|
*stack_bottom = (uptr)stackaddr;
|
|
|
|
CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
|
|
|
|
}
|
|
|
|
|
2012-06-14 22:07:21 +08:00
|
|
|
// Like getenv, but reads env directly from /proc and does not use libc.
|
|
|
|
// This function should be called first inside __asan_init.
|
|
|
|
const char *GetEnv(const char *name) {
|
|
|
|
static char *environ;
|
|
|
|
static uptr len;
|
|
|
|
static bool inited;
|
|
|
|
if (!inited) {
|
|
|
|
inited = true;
|
|
|
|
uptr environ_size;
|
|
|
|
len = ReadFileToBuffer("/proc/self/environ",
|
|
|
|
&environ, &environ_size, 1 << 26);
|
|
|
|
}
|
|
|
|
if (!environ || len == 0) return 0;
|
|
|
|
uptr namelen = internal_strlen(name);
|
|
|
|
const char *p = environ;
|
|
|
|
while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
|
|
|
|
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
|
|
|
|
const char* endp =
|
|
|
|
(char*)internal_memchr(p, '\0', len - (p - environ));
|
|
|
|
if (endp == 0) // this entry isn't NUL terminated
|
|
|
|
return 0;
|
|
|
|
else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
|
|
|
|
return p + namelen + 1; // point after =
|
|
|
|
p = endp + 1;
|
|
|
|
}
|
|
|
|
return 0; // Not found.
|
|
|
|
}
|
|
|
|
|
2013-02-14 22:40:03 +08:00
|
|
|
#ifdef __GLIBC__
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
extern void *__libc_stack_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void GetArgsAndEnv(char ***argv, char ***envp) {
|
|
|
|
uptr *stack_end = (uptr *)__libc_stack_end;
|
|
|
|
int argc = *stack_end;
|
|
|
|
*argv = (char**)(stack_end + 1);
|
|
|
|
*envp = (char**)(stack_end + argc + 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else // __GLIBC__
|
|
|
|
|
2013-01-18 03:50:42 +08:00
|
|
|
static void ReadNullSepFileToArray(const char *path, char ***arr,
|
|
|
|
int arr_size) {
|
|
|
|
char *buff;
|
2012-09-17 17:12:39 +08:00
|
|
|
uptr buff_size = 0;
|
2013-01-18 03:50:42 +08:00
|
|
|
*arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
|
|
|
|
ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
|
|
|
|
(*arr)[0] = buff;
|
|
|
|
int count, i;
|
|
|
|
for (count = 1, i = 1; ; i++) {
|
2012-09-17 17:12:39 +08:00
|
|
|
if (buff[i] == 0) {
|
|
|
|
if (buff[i+1] == 0) break;
|
2013-01-18 03:50:42 +08:00
|
|
|
(*arr)[count] = &buff[i+1];
|
|
|
|
CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
|
|
|
|
count++;
|
2012-09-17 17:12:39 +08:00
|
|
|
}
|
|
|
|
}
|
2013-01-18 03:50:42 +08:00
|
|
|
(*arr)[count] = 0;
|
|
|
|
}
|
|
|
|
|
2013-02-14 22:40:03 +08:00
|
|
|
static void GetArgsAndEnv(char ***argv, char ***envp) {
|
2013-02-14 16:22:06 +08:00
|
|
|
static const int kMaxArgv = 2000, kMaxEnvp = 2000;
|
2013-02-14 22:40:03 +08:00
|
|
|
ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
|
|
|
|
ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // __GLIBC__
|
|
|
|
|
|
|
|
void ReExec() {
|
2013-01-18 03:50:42 +08:00
|
|
|
char **argv, **envp;
|
2013-02-14 22:40:03 +08:00
|
|
|
GetArgsAndEnv(&argv, &envp);
|
2013-02-19 19:09:29 +08:00
|
|
|
execve("/proc/self/exe", argv, envp);
|
|
|
|
Printf("execve failed, errno %d\n", errno);
|
|
|
|
Die();
|
2012-09-17 17:12:39 +08:00
|
|
|
}
|
|
|
|
|
2012-12-10 21:10:40 +08:00
|
|
|
void PrepareForSandboxing() {
|
|
|
|
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
|
|
|
|
// to read the file mappings from /proc/self/maps. Luckily, neither the
|
|
|
|
// process will be able to load additional libraries, so it's fine to use the
|
|
|
|
// cached mappings.
|
|
|
|
MemoryMappingLayout::CacheMemoryMappings();
|
|
|
|
}
|
|
|
|
|
2012-06-07 15:13:46 +08:00
|
|
|
// ----------------- sanitizer_procmaps.h
|
2012-12-05 18:16:02 +08:00
|
|
|
// Linker initialized.
|
|
|
|
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
|
2012-12-04 05:21:22 +08:00
|
|
|
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
|
2012-12-01 10:39:45 +08:00
|
|
|
|
2012-08-27 21:48:48 +08:00
|
|
|
MemoryMappingLayout::MemoryMappingLayout() {
|
2012-12-04 05:21:22 +08:00
|
|
|
proc_self_maps_.len =
|
|
|
|
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
|
|
|
|
&proc_self_maps_.mmaped_size, 1 << 26);
|
|
|
|
if (proc_self_maps_.mmaped_size == 0) {
|
2012-12-01 10:39:45 +08:00
|
|
|
LoadFromCache();
|
2012-12-04 05:21:22 +08:00
|
|
|
CHECK_GT(proc_self_maps_.len, 0);
|
2012-12-01 10:39:45 +08:00
|
|
|
}
|
2012-12-04 05:21:22 +08:00
|
|
|
// internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
|
2012-06-07 14:15:12 +08:00
|
|
|
Reset();
|
2012-12-01 10:39:45 +08:00
|
|
|
// FIXME: in the future we may want to cache the mappings on demand only.
|
|
|
|
CacheMemoryMappings();
|
2012-06-07 14:15:12 +08:00
|
|
|
}
|
|
|
|
|
2012-08-27 21:48:48 +08:00
|
|
|
MemoryMappingLayout::~MemoryMappingLayout() {
|
2012-12-05 07:30:00 +08:00
|
|
|
// Only unmap the buffer if it is different from the cached one. Otherwise
|
|
|
|
// it will be unmapped when the cache is refreshed.
|
|
|
|
if (proc_self_maps_.data != cached_proc_self_maps_.data) {
|
|
|
|
UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
|
|
|
|
}
|
2012-06-07 14:15:12 +08:00
|
|
|
}
|
|
|
|
|
2012-08-27 21:48:48 +08:00
|
|
|
void MemoryMappingLayout::Reset() {
|
2012-12-04 05:21:22 +08:00
|
|
|
current_ = proc_self_maps_.data;
|
2012-06-07 14:15:12 +08:00
|
|
|
}
|
|
|
|
|
2012-12-01 10:39:45 +08:00
|
|
|
// static
|
|
|
|
void MemoryMappingLayout::CacheMemoryMappings() {
|
|
|
|
SpinMutexLock l(&cache_lock_);
|
|
|
|
// Don't invalidate the cache if the mappings are unavailable.
|
2012-12-04 05:21:22 +08:00
|
|
|
ProcSelfMapsBuff old_proc_self_maps;
|
|
|
|
old_proc_self_maps = cached_proc_self_maps_;
|
|
|
|
cached_proc_self_maps_.len =
|
|
|
|
ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
|
|
|
|
&cached_proc_self_maps_.mmaped_size, 1 << 26);
|
|
|
|
if (cached_proc_self_maps_.mmaped_size == 0) {
|
|
|
|
cached_proc_self_maps_ = old_proc_self_maps;
|
2012-12-01 10:39:45 +08:00
|
|
|
} else {
|
2012-12-04 05:21:22 +08:00
|
|
|
if (old_proc_self_maps.mmaped_size) {
|
|
|
|
UnmapOrDie(old_proc_self_maps.data,
|
|
|
|
old_proc_self_maps.mmaped_size);
|
2012-12-01 10:39:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryMappingLayout::LoadFromCache() {
|
|
|
|
SpinMutexLock l(&cache_lock_);
|
2012-12-04 05:21:22 +08:00
|
|
|
if (cached_proc_self_maps_.data) {
|
2012-12-05 18:16:02 +08:00
|
|
|
proc_self_maps_ = cached_proc_self_maps_;
|
2012-12-01 10:39:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-29 21:05:36 +08:00
|
|
|
// Parse a hex value in str and update str.
|
|
|
|
static uptr ParseHex(char **str) {
|
|
|
|
uptr x = 0;
|
|
|
|
char *s;
|
|
|
|
for (s = *str; ; s++) {
|
|
|
|
char c = *s;
|
|
|
|
uptr v = 0;
|
|
|
|
if (c >= '0' && c <= '9')
|
|
|
|
v = c - '0';
|
|
|
|
else if (c >= 'a' && c <= 'f')
|
|
|
|
v = c - 'a' + 10;
|
|
|
|
else if (c >= 'A' && c <= 'F')
|
|
|
|
v = c - 'A' + 10;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
x = x * 16 + v;
|
|
|
|
}
|
|
|
|
*str = s;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
2013-03-13 14:55:02 +08:00
|
|
|
static bool IsOneOf(char c, char c1, char c2) {
|
2012-06-29 21:05:36 +08:00
|
|
|
return c == c1 || c == c2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool IsDecimal(char c) {
|
|
|
|
return c >= '0' && c <= '9';
|
|
|
|
}
|
|
|
|
|
2012-08-27 21:48:48 +08:00
|
|
|
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
2013-03-13 14:51:02 +08:00
|
|
|
char filename[], uptr filename_size,
|
|
|
|
uptr *protection) {
|
2012-12-04 05:21:22 +08:00
|
|
|
char *last = proc_self_maps_.data + proc_self_maps_.len;
|
2012-06-07 14:15:12 +08:00
|
|
|
if (current_ >= last) return false;
|
|
|
|
uptr dummy;
|
|
|
|
if (!start) start = &dummy;
|
|
|
|
if (!end) end = &dummy;
|
|
|
|
if (!offset) offset = &dummy;
|
|
|
|
char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
|
|
|
|
if (next_line == 0)
|
|
|
|
next_line = last;
|
2012-06-29 21:05:36 +08:00
|
|
|
// Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
|
|
|
|
*start = ParseHex(¤t_);
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, '-');
|
2012-06-29 21:05:36 +08:00
|
|
|
*end = ParseHex(¤t_);
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, ' ');
|
2013-03-13 14:51:02 +08:00
|
|
|
uptr local_protection = 0;
|
2013-03-13 14:55:02 +08:00
|
|
|
CHECK(IsOneOf(*current_, '-', 'r'));
|
2013-03-13 14:51:02 +08:00
|
|
|
if (*current_++ == 'r')
|
|
|
|
local_protection |= kProtectionRead;
|
2013-03-13 14:55:02 +08:00
|
|
|
CHECK(IsOneOf(*current_, '-', 'w'));
|
2013-03-13 14:51:02 +08:00
|
|
|
if (*current_++ == 'w')
|
|
|
|
local_protection |= kProtectionWrite;
|
2013-03-13 14:55:02 +08:00
|
|
|
CHECK(IsOneOf(*current_, '-', 'x'));
|
2013-03-13 14:51:02 +08:00
|
|
|
if (*current_++ == 'x')
|
|
|
|
local_protection |= kProtectionExecute;
|
2013-03-13 14:55:02 +08:00
|
|
|
CHECK(IsOneOf(*current_, 's', 'p'));
|
2013-03-13 14:51:02 +08:00
|
|
|
if (*current_++ == 's')
|
|
|
|
local_protection |= kProtectionShared;
|
|
|
|
if (protection) {
|
|
|
|
*protection = local_protection;
|
|
|
|
}
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, ' ');
|
2012-06-29 21:05:36 +08:00
|
|
|
*offset = ParseHex(¤t_);
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, ' ');
|
2012-06-29 21:05:36 +08:00
|
|
|
ParseHex(¤t_);
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, ':');
|
2012-06-29 21:05:36 +08:00
|
|
|
ParseHex(¤t_);
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, ' ');
|
2012-06-29 21:05:36 +08:00
|
|
|
while (IsDecimal(*current_))
|
|
|
|
current_++;
|
2012-06-29 22:14:32 +08:00
|
|
|
CHECK_EQ(*current_++, ' ');
|
2012-06-07 14:15:12 +08:00
|
|
|
// Skip spaces.
|
|
|
|
while (current_ < next_line && *current_ == ' ')
|
|
|
|
current_++;
|
|
|
|
// Fill in the filename.
|
|
|
|
uptr i = 0;
|
|
|
|
while (current_ < next_line) {
|
|
|
|
if (filename && i < filename_size - 1)
|
|
|
|
filename[i++] = *current_;
|
|
|
|
current_++;
|
|
|
|
}
|
|
|
|
if (filename && i < filename_size)
|
|
|
|
filename[i] = 0;
|
|
|
|
current_ = next_line + 1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-08-27 21:48:48 +08:00
|
|
|
// Gets the object name and the offset by walking MemoryMappingLayout.
|
|
|
|
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
|
|
|
|
char filename[],
|
2013-03-13 14:51:02 +08:00
|
|
|
uptr filename_size,
|
|
|
|
uptr *protection) {
|
|
|
|
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
|
|
|
|
protection);
|
2012-06-07 14:15:12 +08:00
|
|
|
}
|
|
|
|
|
2012-12-07 19:27:24 +08:00
|
|
|
bool SanitizerSetThreadName(const char *name) {
|
2013-01-15 17:03:23 +08:00
|
|
|
#ifdef PR_SET_NAME
|
2012-12-08 00:20:06 +08:00
|
|
|
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
|
2013-01-15 17:03:23 +08:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
2012-12-07 19:27:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SanitizerGetThreadName(char *name, int max_len) {
|
2013-01-15 17:03:23 +08:00
|
|
|
#ifdef PR_GET_NAME
|
2012-12-07 19:27:24 +08:00
|
|
|
char buff[17];
|
2012-12-08 00:20:06 +08:00
|
|
|
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
|
2012-12-07 19:27:24 +08:00
|
|
|
return false;
|
|
|
|
internal_strncpy(name, buff, max_len);
|
|
|
|
name[max_len] = 0;
|
|
|
|
return true;
|
2013-01-15 17:03:23 +08:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
2012-12-07 19:27:24 +08:00
|
|
|
}
|
|
|
|
|
2012-12-14 20:24:11 +08:00
|
|
|
#ifndef SANITIZER_GO
|
2012-12-13 17:34:23 +08:00
|
|
|
//------------------------- SlowUnwindStack -----------------------------------
|
|
|
|
#ifdef __arm__
|
|
|
|
#define UNWIND_STOP _URC_END_OF_STACK
|
|
|
|
#define UNWIND_CONTINUE _URC_NO_REASON
|
|
|
|
#else
|
|
|
|
#define UNWIND_STOP _URC_NORMAL_STOP
|
|
|
|
#define UNWIND_CONTINUE _URC_NO_REASON
|
|
|
|
#endif
|
|
|
|
|
|
|
|
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
|
|
|
|
#ifdef __arm__
|
|
|
|
uptr val;
|
|
|
|
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
|
|
|
|
15 /* r15 = PC */, _UVRSD_UINT32, &val);
|
|
|
|
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
|
|
|
|
// Clear the Thumb bit.
|
|
|
|
return val & ~(uptr)1;
|
|
|
|
#else
|
|
|
|
return _Unwind_GetIP(ctx);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
|
|
|
|
StackTrace *b = (StackTrace*)param;
|
|
|
|
CHECK(b->size < b->max_size);
|
|
|
|
uptr pc = Unwind_GetIP(ctx);
|
|
|
|
b->trace[b->size++] = pc;
|
|
|
|
if (b->size == b->max_size) return UNWIND_STOP;
|
|
|
|
return UNWIND_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2012-12-13 20:31:55 +08:00
|
|
|
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
|
2013-01-09 21:55:00 +08:00
|
|
|
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
|
2012-12-13 20:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
|
2012-12-13 17:34:23 +08:00
|
|
|
this->size = 0;
|
|
|
|
this->max_size = max_depth;
|
|
|
|
if (max_depth > 1) {
|
|
|
|
_Unwind_Backtrace(Unwind_Trace, this);
|
2013-01-09 21:55:00 +08:00
|
|
|
// We need to pop a few frames so that pc is on top.
|
|
|
|
// trace[0] belongs to the current function so we always pop it.
|
2012-12-13 20:31:55 +08:00
|
|
|
int to_pop = 1;
|
2013-01-09 21:55:00 +08:00
|
|
|
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
|
|
|
|
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
|
|
|
|
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
|
|
|
|
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
|
|
|
|
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
|
2012-12-13 20:31:55 +08:00
|
|
|
this->PopStackFrames(to_pop);
|
2012-12-13 17:34:23 +08:00
|
|
|
}
|
2012-12-13 20:31:55 +08:00
|
|
|
this->trace[0] = pc;
|
2012-12-13 17:34:23 +08:00
|
|
|
}
|
|
|
|
|
2012-12-14 20:24:11 +08:00
|
|
|
#endif // #ifndef SANITIZER_GO
|
|
|
|
|
2013-01-14 15:51:39 +08:00
|
|
|
enum MutexState {
|
|
|
|
MtxUnlocked = 0,
|
|
|
|
MtxLocked = 1,
|
|
|
|
MtxSleeping = 2
|
|
|
|
};
|
|
|
|
|
|
|
|
BlockingMutex::BlockingMutex(LinkerInitialized) {
|
2013-01-14 16:21:34 +08:00
|
|
|
CHECK_EQ(owner_, 0);
|
2013-01-14 15:51:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingMutex::Lock() {
|
|
|
|
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
|
|
|
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
|
|
|
|
return;
|
|
|
|
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
|
2013-01-14 16:48:26 +08:00
|
|
|
syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
|
2013-01-14 15:51:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingMutex::Unlock() {
|
|
|
|
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
|
|
|
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
|
2013-01-14 16:01:58 +08:00
|
|
|
CHECK_NE(v, MtxUnlocked);
|
2013-01-14 15:51:39 +08:00
|
|
|
if (v == MtxSleeping)
|
2013-01-14 16:48:26 +08:00
|
|
|
syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
|
2013-01-14 15:51:39 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 23:45:20 +08:00
|
|
|
void BlockingMutex::CheckLocked() {
|
|
|
|
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
|
|
|
|
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
|
|
|
|
}
|
|
|
|
|
2013-02-27 19:22:40 +08:00
|
|
|
// ----------------- sanitizer_linux.h
|
|
|
|
// The actual size of this structure is specified by d_reclen.
|
|
|
|
// Note that getdents64 uses a different structure format. We only provide the
|
|
|
|
// 32-bit syscall here.
|
|
|
|
struct linux_dirent {
|
|
|
|
unsigned long d_ino;
|
|
|
|
unsigned long d_off;
|
|
|
|
unsigned short d_reclen;
|
|
|
|
char d_name[256];
|
|
|
|
};
|
|
|
|
|
|
|
|
// Syscall wrappers.
|
|
|
|
long internal_ptrace(int request, int pid, void *addr, void *data) {
|
|
|
|
return syscall(__NR_ptrace, request, pid, addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_waitpid(int pid, int *status, int options) {
|
|
|
|
return syscall(__NR_wait4, pid, status, options, NULL /* rusage */);
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_getppid() {
|
|
|
|
return syscall(__NR_getppid);
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
|
|
|
|
return syscall(__NR_getdents, fd, dirp, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
OFF_T internal_lseek(fd_t fd, OFF_T offset, int whence) {
|
|
|
|
return syscall(__NR_lseek, fd, offset, whence);
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
|
|
|
|
return syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
|
|
|
|
}
|
|
|
|
|
|
|
|
int internal_sigaltstack(const struct sigaltstack *ss,
|
|
|
|
struct sigaltstack *oss) {
|
|
|
|
return syscall(__NR_sigaltstack, ss, oss);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ThreadLister implementation.
|
|
|
|
ThreadLister::ThreadLister(int pid)
|
|
|
|
: pid_(pid),
|
|
|
|
descriptor_(-1),
|
|
|
|
error_(true),
|
|
|
|
entry_((linux_dirent *)buffer_),
|
|
|
|
bytes_read_(0) {
|
|
|
|
char task_directory_path[80];
|
|
|
|
internal_snprintf(task_directory_path, sizeof(task_directory_path),
|
|
|
|
"/proc/%d/task/", pid);
|
|
|
|
descriptor_ = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);
|
|
|
|
if (descriptor_ < 0) {
|
|
|
|
error_ = true;
|
|
|
|
Report("Can't open /proc/%d/task for reading.\n", pid);
|
|
|
|
} else {
|
|
|
|
error_ = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int ThreadLister::GetNextTID() {
|
|
|
|
int tid = -1;
|
|
|
|
do {
|
|
|
|
if (error_)
|
|
|
|
return -1;
|
|
|
|
if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries())
|
|
|
|
return -1;
|
|
|
|
if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' &&
|
|
|
|
entry_->d_name[0] <= '9') {
|
|
|
|
// Found a valid tid.
|
|
|
|
tid = (int)internal_atoll(entry_->d_name);
|
|
|
|
}
|
|
|
|
entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen);
|
|
|
|
} while (tid < 0);
|
|
|
|
return tid;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ThreadLister::Reset() {
|
|
|
|
if (error_ || descriptor_ < 0)
|
|
|
|
return;
|
|
|
|
internal_lseek(descriptor_, 0, SEEK_SET);
|
|
|
|
}
|
|
|
|
|
|
|
|
ThreadLister::~ThreadLister() {
|
|
|
|
if (descriptor_ >= 0)
|
|
|
|
internal_close(descriptor_);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ThreadLister::error() { return error_; }
|
|
|
|
|
|
|
|
bool ThreadLister::GetDirectoryEntries() {
|
|
|
|
CHECK_GE(descriptor_, 0);
|
|
|
|
CHECK_NE(error_, true);
|
|
|
|
bytes_read_ = internal_getdents(descriptor_,
|
|
|
|
(struct linux_dirent *)buffer_,
|
|
|
|
sizeof(buffer_));
|
|
|
|
if (bytes_read_ < 0) {
|
|
|
|
Report("Can't read directory entries from /proc/%d/task.\n", pid_);
|
|
|
|
error_ = true;
|
|
|
|
return false;
|
|
|
|
} else if (bytes_read_ == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
entry_ = (struct linux_dirent *)buffer_;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-06-04 22:27:50 +08:00
|
|
|
} // namespace __sanitizer
|
|
|
|
|
|
|
|
#endif // __linux__
|