2019-08-01 02:51:27 +08:00
|
|
|
//===-- sanitizer_win.cpp -------------------------------------------------===//
|
2012-06-05 15:05:10 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-06-05 15:05:10 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is shared between AddressSanitizer and ThreadSanitizer
|
|
|
|
// run-time libraries and implements windows-specific functions from
|
|
|
|
// sanitizer_libc.h.
|
|
|
|
//===----------------------------------------------------------------------===//
|
2013-03-19 22:33:38 +08:00
|
|
|
|
|
|
|
#include "sanitizer_platform.h"
|
|
|
|
#if SANITIZER_WINDOWS
|
|
|
|
|
2012-11-06 21:19:59 +08:00
|
|
|
#define WIN32_LEAN_AND_MEAN
|
|
|
|
#define NOGDI
|
2012-06-05 15:05:10 +08:00
|
|
|
#include <windows.h>
|
2014-07-11 19:57:41 +08:00
|
|
|
#include <io.h>
|
2014-12-26 22:28:32 +08:00
|
|
|
#include <psapi.h>
|
2014-07-11 19:57:41 +08:00
|
|
|
#include <stdlib.h>
|
2012-06-05 15:05:10 +08:00
|
|
|
|
2012-06-06 17:43:32 +08:00
|
|
|
#include "sanitizer_common.h"
|
2017-07-22 09:46:40 +08:00
|
|
|
#include "sanitizer_file.h"
|
2012-06-05 15:05:10 +08:00
|
|
|
#include "sanitizer_libc.h"
|
2013-01-14 22:28:06 +08:00
|
|
|
#include "sanitizer_mutex.h"
|
2013-05-08 20:45:55 +08:00
|
|
|
#include "sanitizer_placement_new.h"
|
2017-01-21 05:09:36 +08:00
|
|
|
#include "sanitizer_win_defs.h"
|
2012-06-05 15:05:10 +08:00
|
|
|
|
2018-10-10 17:03:58 +08:00
|
|
|
#if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
|
|
|
|
#pragma comment(lib, "psapi")
|
|
|
|
#endif
|
2019-03-02 08:46:54 +08:00
|
|
|
#if SANITIZER_WIN_TRACE
|
|
|
|
#include <traceloggingprovider.h>
|
2019-02-28 07:43:50 +08:00
|
|
|
// Windows trace logging provider init
|
|
|
|
#pragma comment(lib, "advapi32.lib")
|
|
|
|
TRACELOGGING_DECLARE_PROVIDER(g_asan_provider);
|
|
|
|
// GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp
|
|
|
|
TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
|
|
|
|
(0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,
|
|
|
|
0x53, 0x0b, 0xd0, 0xf3, 0xfa));
|
2019-03-02 08:46:54 +08:00
|
|
|
#else
|
|
|
|
#define TraceLoggingUnregister(x)
|
|
|
|
#endif
|
2018-10-10 17:03:58 +08:00
|
|
|
|
2017-01-05 08:37:13 +08:00
|
|
|
// A macro to tell the compiler that this part of the code cannot be reached,
|
|
|
|
// if the compiler supports this feature. Since we're using this in
|
|
|
|
// code that is called when terminating the process, the expansion of the
|
|
|
|
// macro should not terminate the process to avoid infinite recursion.
|
|
|
|
#if defined(__clang__)
|
|
|
|
# define BUILTIN_UNREACHABLE() __builtin_unreachable()
|
2017-01-05 09:35:38 +08:00
|
|
|
#elif defined(__GNUC__) && \
|
|
|
|
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
|
2017-01-05 08:37:13 +08:00
|
|
|
# define BUILTIN_UNREACHABLE() __builtin_unreachable()
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
# define BUILTIN_UNREACHABLE() __assume(0)
|
|
|
|
#else
|
|
|
|
# define BUILTIN_UNREACHABLE()
|
|
|
|
#endif
|
|
|
|
|
2012-06-05 15:05:10 +08:00
|
|
|
namespace __sanitizer {
|
|
|
|
|
2013-05-08 22:43:49 +08:00
|
|
|
#include "sanitizer_syscall_generic.inc"
|
|
|
|
|
2012-06-07 15:13:46 +08:00
|
|
|
// --------------------- sanitizer_common.h
|
2012-11-23 23:38:49 +08:00
|
|
|
uptr GetPageSize() {
|
2016-02-19 01:58:22 +08:00
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
return si.dwPageSize;
|
2012-11-23 23:38:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
uptr GetMmapGranularity() {
|
2016-02-19 01:58:22 +08:00
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
return si.dwAllocationGranularity;
|
2012-11-23 23:38:49 +08:00
|
|
|
}
|
|
|
|
|
2017-11-08 07:51:22 +08:00
|
|
|
uptr GetMaxUserVirtualAddress() {
|
2013-07-16 17:47:39 +08:00
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
return (uptr)si.lpMaximumApplicationAddress;
|
|
|
|
}
|
|
|
|
|
2017-11-21 01:41:57 +08:00
|
|
|
uptr GetMaxVirtualAddress() {
|
|
|
|
return GetMaxUserVirtualAddress();
|
|
|
|
}
|
|
|
|
|
2012-11-09 22:45:30 +08:00
|
|
|
bool FileExists(const char *filename) {
|
2015-08-11 07:40:27 +08:00
|
|
|
return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
|
2012-11-09 22:45:30 +08:00
|
|
|
}
|
|
|
|
|
2013-05-18 00:56:53 +08:00
|
|
|
uptr internal_getpid() {
|
2012-06-06 17:26:25 +08:00
|
|
|
return GetProcessId(GetCurrentProcess());
|
|
|
|
}
|
|
|
|
|
[Sanitizers] Get link map on FreeBSD and NetBSD via documented API
Summary:
Instead of hand-crafting an offset into the structure returned by
dlopen(3) to get at the link map, use the documented API. This is
described in dlinfo(3): by calling it with `RTLD_DI_LINKMAP`, the
dynamic linker ensures the right address is returned.
This is a recommit of 92e267a94dc4272511be674062f8a3e8897b7083, with
dlinfo(3) expliclity being referenced only for FreeBSD, non-Android
Linux, NetBSD and Solaris. Other OSes will have to add their own
implementation.
Reviewers: devnexen, emaste, MaskRay, krytarowski
Reviewed By: krytarowski
Subscribers: krytarowski, vitalybuka, #sanitizers, llvm-commits
Tags: #sanitizers, #llvm
Differential Revision: https://reviews.llvm.org/D73990
2020-02-11 06:43:12 +08:00
|
|
|
int internal_dlinfo(void *handle, int request, void *p) {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2013-03-26 06:04:29 +08:00
|
|
|
// In contrast to POSIX, on Windows GetCurrentThreadId()
|
|
|
|
// returns a system-unique identifier.
|
2017-04-18 02:17:38 +08:00
|
|
|
tid_t GetTid() {
|
2012-06-15 14:37:34 +08:00
|
|
|
return GetCurrentThreadId();
|
|
|
|
}
|
|
|
|
|
2013-03-26 06:04:29 +08:00
|
|
|
uptr GetThreadSelf() {
|
|
|
|
return GetTid();
|
|
|
|
}
|
|
|
|
|
2014-09-01 19:44:59 +08:00
|
|
|
#if !SANITIZER_GO
|
2012-06-07 15:32:00 +08:00
|
|
|
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
|
2012-06-07 15:13:46 +08:00
|
|
|
uptr *stack_bottom) {
|
|
|
|
CHECK(stack_top);
|
|
|
|
CHECK(stack_bottom);
|
|
|
|
MEMORY_BASIC_INFORMATION mbi;
|
2012-06-20 23:19:17 +08:00
|
|
|
CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
|
2012-06-07 15:13:46 +08:00
|
|
|
// FIXME: is it possible for the stack to not be a single allocation?
|
|
|
|
// Are these values what ASan expects to get (reserved, not committed;
|
|
|
|
// including stack guard page) ?
|
|
|
|
*stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
|
|
|
|
*stack_bottom = (uptr)mbi.AllocationBase;
|
|
|
|
}
|
2014-09-01 19:44:59 +08:00
|
|
|
#endif // #if !SANITIZER_GO
|
2012-06-07 15:13:46 +08:00
|
|
|
|
2015-11-21 02:42:01 +08:00
|
|
|
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
|
2012-06-06 17:26:25 +08:00
|
|
|
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
2015-08-13 07:55:38 +08:00
|
|
|
if (rv == 0)
|
2015-11-21 02:42:01 +08:00
|
|
|
ReportMmapFailureAndDie(size, mem_type, "allocate",
|
|
|
|
GetLastError(), raw_report);
|
2012-06-06 17:26:25 +08:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
void UnmapOrDie(void *addr, uptr size) {
|
2015-04-01 00:39:20 +08:00
|
|
|
if (!size || !addr)
|
|
|
|
return;
|
|
|
|
|
2016-03-11 04:47:26 +08:00
|
|
|
MEMORY_BASIC_INFORMATION mbi;
|
|
|
|
CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
|
|
|
|
|
2016-04-27 23:55:05 +08:00
|
|
|
// MEM_RELEASE can only be used to unmap whole regions previously mapped with
|
|
|
|
// VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
|
|
|
|
// fails try MEM_DECOMMIT.
|
2016-03-11 04:47:26 +08:00
|
|
|
if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
|
2016-04-27 23:55:05 +08:00
|
|
|
if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
|
|
|
|
Report("ERROR: %s failed to "
|
|
|
|
"deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
|
|
|
|
SanitizerToolName, size, size, addr, GetLastError());
|
|
|
|
CHECK("unable to unmap" && 0);
|
|
|
|
}
|
2012-06-06 23:47:40 +08:00
|
|
|
}
|
2012-06-06 17:26:25 +08:00
|
|
|
}
|
|
|
|
|
2017-06-22 08:02:37 +08:00
|
|
|
static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
|
|
|
|
const char *mmap_type) {
|
|
|
|
error_t last_error = GetLastError();
|
|
|
|
if (last_error == ERROR_NOT_ENOUGH_MEMORY)
|
|
|
|
return nullptr;
|
|
|
|
ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
|
|
|
|
}
|
|
|
|
|
2017-06-17 02:48:08 +08:00
|
|
|
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
|
|
|
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
2017-06-22 08:02:37 +08:00
|
|
|
if (rv == 0)
|
|
|
|
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
|
2017-06-17 02:48:08 +08:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2016-03-11 04:47:26 +08:00
|
|
|
// We want to map a chunk of address space aligned to 'alignment'.
|
2017-06-22 08:02:37 +08:00
|
|
|
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
|
|
|
const char *mem_type) {
|
2016-03-11 04:47:26 +08:00
|
|
|
CHECK(IsPowerOfTwo(size));
|
|
|
|
CHECK(IsPowerOfTwo(alignment));
|
|
|
|
|
|
|
|
// Windows will align our allocations to at least 64K.
|
|
|
|
alignment = Max(alignment, GetMmapGranularity());
|
|
|
|
|
|
|
|
uptr mapped_addr =
|
|
|
|
(uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
if (!mapped_addr)
|
2017-06-22 08:02:37 +08:00
|
|
|
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
2016-03-11 04:47:26 +08:00
|
|
|
|
|
|
|
// If we got it right on the first try, return. Otherwise, unmap it and go to
|
|
|
|
// the slow path.
|
|
|
|
if (IsAligned(mapped_addr, alignment))
|
|
|
|
return (void*)mapped_addr;
|
|
|
|
if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
|
|
|
|
ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
|
|
|
|
|
|
|
|
// If we didn't get an aligned address, overallocate, find an aligned address,
|
|
|
|
// unmap, and try to allocate at that aligned address.
|
|
|
|
int retries = 0;
|
|
|
|
const int kMaxRetries = 10;
|
|
|
|
for (; retries < kMaxRetries &&
|
|
|
|
(mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
|
|
|
|
retries++) {
|
|
|
|
// Overallocate size + alignment bytes.
|
|
|
|
mapped_addr =
|
|
|
|
(uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
|
|
|
|
if (!mapped_addr)
|
2017-06-22 08:02:37 +08:00
|
|
|
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
2016-03-11 04:47:26 +08:00
|
|
|
|
|
|
|
// Find the aligned address.
|
|
|
|
uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
|
|
|
|
|
|
|
|
// Free the overallocation.
|
|
|
|
if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
|
|
|
|
ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
|
|
|
|
|
|
|
|
// Attempt to allocate exactly the number of bytes we need at the aligned
|
|
|
|
// address. This may fail for a number of reasons, in which case we continue
|
|
|
|
// the loop.
|
|
|
|
mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
|
|
|
|
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail if we can't make this work quickly.
|
|
|
|
if (retries == kMaxRetries && mapped_addr == 0)
|
2017-06-22 08:02:37 +08:00
|
|
|
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
2016-03-11 04:47:26 +08:00
|
|
|
|
|
|
|
return (void *)mapped_addr;
|
|
|
|
}
|
|
|
|
|
2018-07-20 16:33:41 +08:00
|
|
|
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
|
2012-12-13 13:36:00 +08:00
|
|
|
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
|
|
|
|
// but on Win64 it does.
|
2016-07-12 05:40:59 +08:00
|
|
|
(void)name; // unsupported
|
2016-07-20 20:50:49 +08:00
|
|
|
#if !SANITIZER_GO && SANITIZER_WINDOWS64
|
|
|
|
// On asan/Windows64, use MEM_COMMIT would result in error
|
2016-07-12 05:40:59 +08:00
|
|
|
// 1455:ERROR_COMMITMENT_LIMIT.
|
2016-07-20 20:50:49 +08:00
|
|
|
// Asan uses exception handler to commit page on demand.
|
2016-07-12 05:40:59 +08:00
|
|
|
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
|
|
|
|
#else
|
|
|
|
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
|
|
|
|
PAGE_READWRITE);
|
|
|
|
#endif
|
2018-07-20 16:33:41 +08:00
|
|
|
if (p == 0) {
|
2014-03-19 16:23:00 +08:00
|
|
|
Report("ERROR: %s failed to "
|
|
|
|
"allocate %p (%zd) bytes at %p (error code: %d)\n",
|
|
|
|
SanitizerToolName, size, size, fixed_addr, GetLastError());
|
2018-07-20 16:33:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2012-06-14 22:42:58 +08:00
|
|
|
}
|
|
|
|
|
2019-08-27 12:02:19 +08:00
|
|
|
bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {
|
|
|
|
// FIXME: Windows support large pages too. Might be worth checking
|
|
|
|
return MmapFixedNoReserve(fixed_addr, size, name);
|
|
|
|
}
|
|
|
|
|
2016-07-08 01:44:08 +08:00
|
|
|
// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
|
|
|
|
// 'MmapFixedNoAccess'.
|
2019-02-06 09:14:50 +08:00
|
|
|
void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
|
2016-03-11 04:47:26 +08:00
|
|
|
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
|
2016-07-08 01:44:08 +08:00
|
|
|
MEM_COMMIT, PAGE_READWRITE);
|
2016-03-11 04:47:26 +08:00
|
|
|
if (p == 0) {
|
|
|
|
char mem_type[30];
|
|
|
|
internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
|
|
|
|
fixed_addr);
|
|
|
|
ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
|
|
|
|
}
|
|
|
|
return p;
|
2012-12-13 13:36:00 +08:00
|
|
|
}
|
|
|
|
|
Introduce ReservedAddressRange to sanitizer_common.
Summary:
Fixed version of https://reviews.llvm.org/D38437 (fixes Win/Fuchsia failures).
Creating a new revision, since the old one was getting a bit old/crowded.
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
VMAR, which means that MmapNoAccess can only be called once. This works
for the sanitizer allocator but *not* for the Scudo allocator.
Hence, this changeset introduces a new ReservedAddressRange object to
serve as the new API for these calls. In this changeset, the object
still calls into the old Mmap implementations.
The next changeset two changesets will convert the sanitizer and scudo
allocators to use the new APIs, respectively. (ReservedAddressRange will
replace the SecondaryHeader in Scudo.)
Finally, a last changeset will update the Fuchsia implementation.
Reviewers: alekseyshl, cryptoad, phosek
Reviewed By: alekseyshl, cryptoad
Subscribers: kubamracek
Differential Revision: https://reviews.llvm.org/D39072
llvm-svn: 316934
2017-10-31 01:56:24 +08:00
|
|
|
// Uses fixed_addr for now.
|
|
|
|
// Will use offset instead once we've implemented this function for real.
|
2019-02-06 09:58:23 +08:00
|
|
|
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
|
2017-11-08 00:19:24 +08:00
|
|
|
return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
|
|
|
|
}
|
|
|
|
|
2019-02-06 09:58:23 +08:00
|
|
|
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
|
|
|
|
const char *name) {
|
2017-10-31 02:16:05 +08:00
|
|
|
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
|
Introduce ReservedAddressRange to sanitizer_common.
Summary:
Fixed version of https://reviews.llvm.org/D38437 (fixes Win/Fuchsia failures).
Creating a new revision, since the old one was getting a bit old/crowded.
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
VMAR, which means that MmapNoAccess can only be called once. This works
for the sanitizer allocator but *not* for the Scudo allocator.
Hence, this changeset introduces a new ReservedAddressRange object to
serve as the new API for these calls. In this changeset, the object
still calls into the old Mmap implementations.
The next changeset two changesets will convert the sanitizer and scudo
allocators to use the new APIs, respectively. (ReservedAddressRange will
replace the SecondaryHeader in Scudo.)
Finally, a last changeset will update the Fuchsia implementation.
Reviewers: alekseyshl, cryptoad, phosek
Reviewed By: alekseyshl, cryptoad
Subscribers: kubamracek
Differential Revision: https://reviews.llvm.org/D39072
llvm-svn: 316934
2017-10-31 01:56:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
|
|
|
|
// Only unmap if it covers the entire range.
|
2018-04-20 02:38:15 +08:00
|
|
|
CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_));
|
|
|
|
// We unmap the whole range, just null out the base.
|
|
|
|
base_ = nullptr;
|
|
|
|
size_ = 0;
|
|
|
|
UnmapOrDie(reinterpret_cast<void*>(addr), size);
|
Introduce ReservedAddressRange to sanitizer_common.
Summary:
Fixed version of https://reviews.llvm.org/D38437 (fixes Win/Fuchsia failures).
Creating a new revision, since the old one was getting a bit old/crowded.
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
VMAR, which means that MmapNoAccess can only be called once. This works
for the sanitizer allocator but *not* for the Scudo allocator.
Hence, this changeset introduces a new ReservedAddressRange object to
serve as the new API for these calls. In this changeset, the object
still calls into the old Mmap implementations.
The next changeset two changesets will convert the sanitizer and scudo
allocators to use the new APIs, respectively. (ReservedAddressRange will
replace the SecondaryHeader in Scudo.)
Finally, a last changeset will update the Fuchsia implementation.
Reviewers: alekseyshl, cryptoad, phosek
Reviewed By: alekseyshl, cryptoad
Subscribers: kubamracek
Differential Revision: https://reviews.llvm.org/D39072
llvm-svn: 316934
2017-10-31 01:56:24 +08:00
|
|
|
}
|
|
|
|
|
2019-02-06 09:14:50 +08:00
|
|
|
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
|
2017-06-27 06:54:10 +08:00
|
|
|
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
|
|
|
|
MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
if (p == 0) {
|
|
|
|
char mem_type[30];
|
|
|
|
internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
|
|
|
|
fixed_addr);
|
|
|
|
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2013-12-13 23:03:49 +08:00
|
|
|
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
|
|
|
|
// FIXME: make this really NoReserve?
|
|
|
|
return MmapOrDie(size, mem_type);
|
|
|
|
}
|
|
|
|
|
Introduce ReservedAddressRange to sanitizer_common.
Summary:
Fixed version of https://reviews.llvm.org/D38437 (fixes Win/Fuchsia failures).
Creating a new revision, since the old one was getting a bit old/crowded.
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
VMAR, which means that MmapNoAccess can only be called once. This works
for the sanitizer allocator but *not* for the Scudo allocator.
Hence, this changeset introduces a new ReservedAddressRange object to
serve as the new API for these calls. In this changeset, the object
still calls into the old Mmap implementations.
The next changeset two changesets will convert the sanitizer and scudo
allocators to use the new APIs, respectively. (ReservedAddressRange will
replace the SecondaryHeader in Scudo.)
Finally, a last changeset will update the Fuchsia implementation.
Reviewers: alekseyshl, cryptoad, phosek
Reviewed By: alekseyshl, cryptoad
Subscribers: kubamracek
Differential Revision: https://reviews.llvm.org/D39072
llvm-svn: 316934
2017-10-31 01:56:24 +08:00
|
|
|
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
|
2018-04-20 02:38:15 +08:00
|
|
|
base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);
|
Introduce ReservedAddressRange to sanitizer_common.
Summary:
Fixed version of https://reviews.llvm.org/D38437 (fixes Win/Fuchsia failures).
Creating a new revision, since the old one was getting a bit old/crowded.
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
VMAR, which means that MmapNoAccess can only be called once. This works
for the sanitizer allocator but *not* for the Scudo allocator.
Hence, this changeset introduces a new ReservedAddressRange object to
serve as the new API for these calls. In this changeset, the object
still calls into the old Mmap implementations.
The next changeset two changesets will convert the sanitizer and scudo
allocators to use the new APIs, respectively. (ReservedAddressRange will
replace the SecondaryHeader in Scudo.)
Finally, a last changeset will update the Fuchsia implementation.
Reviewers: alekseyshl, cryptoad, phosek
Reviewed By: alekseyshl, cryptoad
Subscribers: kubamracek
Differential Revision: https://reviews.llvm.org/D39072
llvm-svn: 316934
2017-10-31 01:56:24 +08:00
|
|
|
size_ = size;
|
|
|
|
name_ = name;
|
2017-11-28 03:53:53 +08:00
|
|
|
(void)os_handle_; // unsupported
|
Introduce ReservedAddressRange to sanitizer_common.
Summary:
Fixed version of https://reviews.llvm.org/D38437 (fixes Win/Fuchsia failures).
Creating a new revision, since the old one was getting a bit old/crowded.
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global
VMAR, which means that MmapNoAccess can only be called once. This works
for the sanitizer allocator but *not* for the Scudo allocator.
Hence, this changeset introduces a new ReservedAddressRange object to
serve as the new API for these calls. In this changeset, the object
still calls into the old Mmap implementations.
The next changeset two changesets will convert the sanitizer and scudo
allocators to use the new APIs, respectively. (ReservedAddressRange will
replace the SecondaryHeader in Scudo.)
Finally, a last changeset will update the Fuchsia implementation.
Reviewers: alekseyshl, cryptoad, phosek
Reviewed By: alekseyshl, cryptoad
Subscribers: kubamracek
Differential Revision: https://reviews.llvm.org/D39072
llvm-svn: 316934
2017-10-31 01:56:24 +08:00
|
|
|
return reinterpret_cast<uptr>(base_);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-23 07:46:53 +08:00
|
|
|
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
|
2015-05-30 06:31:28 +08:00
|
|
|
(void)name; // unsupported
|
2015-02-02 23:04:23 +08:00
|
|
|
void *res = VirtualAlloc((LPVOID)fixed_addr, size,
|
2016-07-08 01:44:08 +08:00
|
|
|
MEM_RESERVE, PAGE_NOACCESS);
|
2015-02-02 23:04:23 +08:00
|
|
|
if (res == 0)
|
|
|
|
Report("WARNING: %s failed to "
|
|
|
|
"mprotect %p (%zd) bytes at %p (error code: %d)\n",
|
|
|
|
SanitizerToolName, size, size, fixed_addr, GetLastError());
|
|
|
|
return res;
|
2012-06-14 22:42:58 +08:00
|
|
|
}
|
|
|
|
|
2016-04-23 08:05:24 +08:00
|
|
|
void *MmapNoAccess(uptr size) {
|
2016-08-05 02:15:38 +08:00
|
|
|
void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
|
|
|
|
if (res == 0)
|
|
|
|
Report("WARNING: %s failed to "
|
|
|
|
"mprotect %p (%zd) bytes (error code: %d)\n",
|
|
|
|
SanitizerToolName, size, size, GetLastError());
|
|
|
|
return res;
|
2016-04-23 08:05:24 +08:00
|
|
|
}
|
|
|
|
|
2015-04-10 23:02:19 +08:00
|
|
|
bool MprotectNoAccess(uptr addr, uptr size) {
|
|
|
|
DWORD old_protection;
|
|
|
|
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
|
|
|
|
}
|
|
|
|
|
2016-12-01 04:41:59 +08:00
|
|
|
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
|
2013-02-08 20:02:00 +08:00
|
|
|
// This is almost useless on 32-bits.
|
2015-02-03 18:15:15 +08:00
|
|
|
// FIXME: add madvise-analog when we move to 64-bits.
|
2013-02-08 20:02:00 +08:00
|
|
|
}
|
|
|
|
|
2019-08-07 05:30:03 +08:00
|
|
|
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
|
2016-08-27 07:58:42 +08:00
|
|
|
// FIXME: probably similar to ReleaseMemoryToOS.
|
2015-01-21 10:05:31 +08:00
|
|
|
}
|
|
|
|
|
2018-06-14 01:18:41 +08:00
|
|
|
bool DontDumpShadowMemory(uptr addr, uptr length) {
|
2015-02-03 18:15:15 +08:00
|
|
|
// This is almost useless on 32-bits.
|
|
|
|
// FIXME: add madvise-analog when we move to 64-bits.
|
2018-06-14 01:18:41 +08:00
|
|
|
return true;
|
2015-02-03 18:15:15 +08:00
|
|
|
}
|
|
|
|
|
[compiler-rt][asan][hwasan] Refactor shadow setup into sanitizer_common (NFCI)
Summary:
This refactors some common support related to shadow memory setup from
asan and hwasan into sanitizer_common. This should not only reduce code
duplication but also make these facilities available for new compiler-rt
uses (e.g. heap profiling).
In most cases the separate copies of the code were either identical, or
at least functionally identical. A few notes:
In ProtectGap, the asan version checked the address against an upper
bound (kZeroBaseMaxShadowStart, which is (2^18). I have created a copy
of kZeroBaseMaxShadowStart in hwasan_mapping.h, with the same value, as
it isn't clear why that code should not do the same check. If it
shouldn't, I can remove this and guard this check so that it only
happens for asan.
In asan's InitializeShadowMemory, in the dynamic shadow case it was
setting __asan_shadow_memory_dynamic_address to 0 (which then sets both
macro SHADOW_OFFSET as well as macro kLowShadowBeg to 0) before calling
FindDynamicShadowStart(). AFAICT this is only needed because
FindDynamicShadowStart utilizes kHighShadowEnd to
get the shadow size, and kHighShadowEnd is a macro invoking
MEM_TO_SHADOW(kHighMemEnd) which in turn invokes:
(((kHighMemEnd) >> SHADOW_SCALE) + (SHADOW_OFFSET))
I.e. it computes the shadow space needed by kHighMemEnd (the shift), and
adds the offset. Since we only want the shadow space here, the earlier
setting of SHADOW_OFFSET to 0 via __asan_shadow_memory_dynamic_address
accomplishes this. In the hwasan version, it simply gets the shadow
space via "MemToShadowSize(kHighMemEnd)", where MemToShadowSize just
does the shift. I've simplified the asan handling to do the same
thing, and therefore was able to remove the setting of the SHADOW_OFFSET
via __asan_shadow_memory_dynamic_address to 0.
Reviewers: vitalybuka, kcc, eugenis
Subscribers: dberris, #sanitizers, llvm-commits, davidxl
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D83247
2020-07-07 02:05:12 +08:00
|
|
|
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
|
|
|
|
uptr min_shadow_base_alignment,
|
|
|
|
UNUSED uptr &high_mem_end) {
|
|
|
|
const uptr granularity = GetMmapGranularity();
|
|
|
|
const uptr alignment =
|
|
|
|
Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
|
|
|
|
const uptr left_padding =
|
|
|
|
Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
|
|
|
|
uptr space_size = shadow_size_bytes + left_padding;
|
|
|
|
uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
|
|
|
|
granularity, nullptr, nullptr);
|
|
|
|
CHECK_NE((uptr)0, shadow_start);
|
|
|
|
CHECK(IsAligned(shadow_start, alignment));
|
|
|
|
return shadow_start;
|
|
|
|
}
|
|
|
|
|
2017-07-13 07:29:21 +08:00
|
|
|
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
|
2018-02-27 02:33:21 +08:00
|
|
|
uptr *largest_gap_found,
|
|
|
|
uptr *max_occupied_addr) {
|
2016-10-01 01:47:34 +08:00
|
|
|
uptr address = 0;
|
|
|
|
while (true) {
|
|
|
|
MEMORY_BASIC_INFORMATION info;
|
|
|
|
if (!::VirtualQuery((void*)address, &info, sizeof(info)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (info.State == MEM_FREE) {
|
|
|
|
uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
|
|
|
|
alignment);
|
|
|
|
if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
|
|
|
|
return shadow_address;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move to the next region.
|
|
|
|
address = (uptr)info.BaseAddress + info.RegionSize;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-15 15:29:14 +08:00
|
|
|
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
|
2014-12-26 22:28:32 +08:00
|
|
|
MEMORY_BASIC_INFORMATION mbi;
|
|
|
|
CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
|
2015-02-02 23:04:23 +08:00
|
|
|
return mbi.Protect == PAGE_NOACCESS &&
|
2014-12-26 22:28:32 +08:00
|
|
|
(uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
|
2012-06-15 15:29:14 +08:00
|
|
|
}
|
|
|
|
|
2012-07-03 16:24:14 +08:00
|
|
|
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2015-07-31 19:29:25 +08:00
|
|
|
void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
|
2014-05-28 16:26:24 +08:00
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2013-03-14 19:29:06 +08:00
|
|
|
static const int kMaxEnvNameLength = 128;
|
2013-06-10 18:02:02 +08:00
|
|
|
static const DWORD kMaxEnvValueLength = 32767;
|
2013-03-14 19:10:23 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct EnvVariable {
|
|
|
|
char name[kMaxEnvNameLength];
|
|
|
|
char value[kMaxEnvValueLength];
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
static const int kEnvVariables = 5;
|
|
|
|
static EnvVariable env_vars[kEnvVariables];
|
|
|
|
static int num_env_vars;
|
|
|
|
|
2012-06-14 22:07:21 +08:00
|
|
|
const char *GetEnv(const char *name) {
|
2013-03-14 19:10:23 +08:00
|
|
|
// Note: this implementation caches the values of the environment variables
|
|
|
|
// and limits their quantity.
|
|
|
|
for (int i = 0; i < num_env_vars; i++) {
|
|
|
|
if (0 == internal_strcmp(name, env_vars[i].name))
|
|
|
|
return env_vars[i].value;
|
|
|
|
}
|
|
|
|
CHECK_LT(num_env_vars, kEnvVariables);
|
|
|
|
DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
|
|
|
|
kMaxEnvValueLength);
|
|
|
|
if (rv > 0 && rv < kMaxEnvValueLength) {
|
|
|
|
CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
|
|
|
|
internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
|
|
|
|
num_env_vars++;
|
|
|
|
return env_vars[num_env_vars - 1].value;
|
|
|
|
}
|
2012-06-14 22:07:21 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-18 16:44:30 +08:00
|
|
|
const char *GetPwd() {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2013-02-18 15:17:12 +08:00
|
|
|
u32 GetUid() {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2014-12-30 22:44:12 +08:00
|
|
|
namespace {
|
|
|
|
struct ModuleInfo {
|
2015-04-06 20:49:30 +08:00
|
|
|
const char *filepath;
|
2014-12-30 22:44:12 +08:00
|
|
|
uptr base_address;
|
|
|
|
uptr end_address;
|
|
|
|
};
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2014-12-30 22:44:12 +08:00
|
|
|
int CompareModulesBase(const void *pl, const void *pr) {
|
2017-10-31 01:26:57 +08:00
|
|
|
const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr;
|
2015-04-06 20:49:30 +08:00
|
|
|
if (l->base_address < r->base_address)
|
2014-12-30 22:44:12 +08:00
|
|
|
return -1;
|
2015-04-06 20:49:30 +08:00
|
|
|
return l->base_address > r->base_address;
|
2014-12-30 22:44:12 +08:00
|
|
|
}
|
2015-11-13 00:29:24 +08:00
|
|
|
#endif
|
2014-12-30 23:30:19 +08:00
|
|
|
} // namespace
|
2014-12-30 22:44:12 +08:00
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2012-06-15 14:08:19 +08:00
|
|
|
void DumpProcessMap() {
|
2014-12-26 22:28:32 +08:00
|
|
|
Report("Dumping process modules:\n");
|
2016-02-23 02:52:51 +08:00
|
|
|
ListOfModules modules;
|
|
|
|
modules.init();
|
|
|
|
uptr num_modules = modules.size();
|
2014-12-30 22:44:12 +08:00
|
|
|
|
2018-05-07 13:56:36 +08:00
|
|
|
InternalMmapVector<ModuleInfo> module_infos(num_modules);
|
2015-04-06 20:49:30 +08:00
|
|
|
for (size_t i = 0; i < num_modules; ++i) {
|
|
|
|
module_infos[i].filepath = modules[i].full_name();
|
2016-08-06 01:55:00 +08:00
|
|
|
module_infos[i].base_address = modules[i].ranges().front()->beg;
|
|
|
|
module_infos[i].end_address = modules[i].ranges().back()->end;
|
2014-12-26 22:28:32 +08:00
|
|
|
}
|
2015-04-06 20:49:30 +08:00
|
|
|
qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
|
|
|
|
CompareModulesBase);
|
2014-12-30 22:44:12 +08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < num_modules; ++i) {
|
2015-04-06 20:49:30 +08:00
|
|
|
const ModuleInfo &mi = module_infos[i];
|
2014-12-30 22:44:12 +08:00
|
|
|
if (mi.end_address != 0) {
|
|
|
|
Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
|
2015-04-06 20:49:30 +08:00
|
|
|
mi.filepath[0] ? mi.filepath : "[no name]");
|
2015-04-06 20:54:06 +08:00
|
|
|
} else if (mi.filepath[0]) {
|
2015-04-06 20:49:30 +08:00
|
|
|
Printf("\t??\?-??? %s\n", mi.filepath);
|
2014-12-26 22:28:32 +08:00
|
|
|
} else {
|
|
|
|
Printf("\t???\n");
|
|
|
|
}
|
|
|
|
}
|
2012-06-15 14:08:19 +08:00
|
|
|
}
|
2015-02-16 21:51:17 +08:00
|
|
|
#endif
|
2012-06-15 14:08:19 +08:00
|
|
|
|
2014-08-13 06:31:19 +08:00
|
|
|
void DisableCoreDumperIfNecessary() {
|
2014-05-06 16:21:50 +08:00
|
|
|
// Do nothing.
|
2012-06-15 14:08:19 +08:00
|
|
|
}
|
|
|
|
|
2012-09-17 17:12:39 +08:00
|
|
|
void ReExec() {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2018-04-04 02:07:22 +08:00
|
|
|
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
|
2012-12-10 21:10:40 +08:00
|
|
|
|
2012-09-17 17:12:39 +08:00
|
|
|
bool StackSizeIsUnlimited() {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetStackSizeLimitInBytes(uptr limit) {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2014-08-13 06:31:19 +08:00
|
|
|
bool AddressSpaceIsUnlimited() {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetAddressSpaceUnlimited() {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2015-02-27 11:12:19 +08:00
|
|
|
bool IsPathSeparator(const char c) {
|
|
|
|
return c == '\\' || c == '/';
|
|
|
|
}
|
|
|
|
|
2019-02-02 05:35:17 +08:00
|
|
|
static bool IsAlpha(char c) {
|
|
|
|
c = ToLower(c);
|
|
|
|
return c >= 'a' && c <= 'z';
|
|
|
|
}
|
|
|
|
|
2015-02-27 11:12:19 +08:00
|
|
|
bool IsAbsolutePath(const char *path) {
|
2019-02-02 05:35:17 +08:00
|
|
|
return path != nullptr && IsAlpha(path[0]) && path[1] == ':' &&
|
|
|
|
IsPathSeparator(path[2]);
|
2015-02-27 11:12:19 +08:00
|
|
|
}
|
|
|
|
|
2012-06-15 14:37:34 +08:00
|
|
|
void SleepForSeconds(int seconds) {
|
|
|
|
Sleep(seconds * 1000);
|
|
|
|
}
|
|
|
|
|
2012-06-18 16:44:30 +08:00
|
|
|
void SleepForMillis(int millis) {
|
|
|
|
Sleep(millis);
|
|
|
|
}
|
|
|
|
|
2013-06-10 18:02:02 +08:00
|
|
|
u64 NanoTime() {
|
2018-04-24 01:05:47 +08:00
|
|
|
static LARGE_INTEGER frequency = {};
|
[sanitizer] Implement NanoTime & MonotonicNanoTime for Windows
Summary:
Implement `MonotonicNanoTime` using `QueryPerformanceCounter`.
This function is used by Scudo & the 64-bit Primary allocator. Implementing it
now means that the release-to-OS mechanism of the Primary will kick in (it
never did since the function returned 0 always), but `ReleaseMemoryPagesToOS` is
still not currently implemented for Windows.
Performance wise, this adds a syscall & a 64-bit division per call to
`MonotonicNanoTime` so the impact might not be negligible, but I don't think
there is a way around it.
Reviewers: rnk, alekseyshl, amccarth
Reviewed By: alekseyshl, amccarth
Subscribers: amccarth, flowerhack, kubamracek, delcypher, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D42579
llvm-svn: 324011
2018-02-02 05:38:14 +08:00
|
|
|
LARGE_INTEGER counter;
|
|
|
|
if (UNLIKELY(frequency.QuadPart == 0)) {
|
|
|
|
QueryPerformanceFrequency(&frequency);
|
|
|
|
CHECK_NE(frequency.QuadPart, 0);
|
|
|
|
}
|
|
|
|
QueryPerformanceCounter(&counter);
|
|
|
|
counter.QuadPart *= 1000ULL * 1000000ULL;
|
|
|
|
counter.QuadPart /= frequency.QuadPart;
|
|
|
|
return counter.QuadPart;
|
2013-06-10 18:02:02 +08:00
|
|
|
}
|
|
|
|
|
[sanitizer] Implement NanoTime & MonotonicNanoTime for Windows
Summary:
Implement `MonotonicNanoTime` using `QueryPerformanceCounter`.
This function is used by Scudo & the 64-bit Primary allocator. Implementing it
now means that the release-to-OS mechanism of the Primary will kick in (it
never did since the function returned 0 always), but `ReleaseMemoryPagesToOS` is
still not currently implemented for Windows.
Performance wise, this adds a syscall & a 64-bit division per call to
`MonotonicNanoTime` so the impact might not be negligible, but I don't think
there is a way around it.
Reviewers: rnk, alekseyshl, amccarth
Reviewed By: alekseyshl, amccarth
Subscribers: amccarth, flowerhack, kubamracek, delcypher, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D42579
llvm-svn: 324011
2018-02-02 05:38:14 +08:00
|
|
|
u64 MonotonicNanoTime() { return NanoTime(); }
|
2017-12-14 00:23:54 +08:00
|
|
|
|
2012-06-15 14:37:34 +08:00
|
|
|
void Abort() {
|
2014-12-26 20:25:54 +08:00
|
|
|
internal__exit(3);
|
2012-06-15 14:37:34 +08:00
|
|
|
}
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2015-08-04 03:51:18 +08:00
|
|
|
// Read the file to extract the ImageBase field from the PE header. If ASLR is
|
|
|
|
// disabled and this virtual address is available, the loader will typically
|
|
|
|
// load the image at this address. Therefore, we call it the preferred base. Any
|
|
|
|
// addresses in the DWARF typically assume that the object has been loaded at
|
|
|
|
// this address.
|
|
|
|
static uptr GetPreferredBase(const char *modname) {
|
|
|
|
fd_t fd = OpenFile(modname, RdOnly, nullptr);
|
|
|
|
if (fd == kInvalidFd)
|
|
|
|
return 0;
|
|
|
|
FileCloser closer(fd);
|
|
|
|
|
|
|
|
// Read just the DOS header.
|
|
|
|
IMAGE_DOS_HEADER dos_header;
|
|
|
|
uptr bytes_read;
|
|
|
|
if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
|
|
|
|
bytes_read != sizeof(dos_header))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// The file should start with the right signature.
|
|
|
|
if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// The layout at e_lfanew is:
|
|
|
|
// "PE\0\0"
|
|
|
|
// IMAGE_FILE_HEADER
|
|
|
|
// IMAGE_OPTIONAL_HEADER
|
|
|
|
// Seek to e_lfanew and read all that data.
|
|
|
|
char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
|
|
|
|
if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
|
|
|
|
INVALID_SET_FILE_POINTER)
|
|
|
|
return 0;
|
|
|
|
if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
|
|
|
|
bytes_read != sizeof(buf))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Check for "PE\0\0" before the PE header.
|
|
|
|
char *pe_sig = &buf[0];
|
|
|
|
if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
|
|
|
|
IMAGE_OPTIONAL_HEADER *pe_header =
|
|
|
|
(IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
|
|
|
|
|
|
|
|
// Check for more magic in the PE header.
|
|
|
|
if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Finally, return the ImageBase.
|
|
|
|
return (uptr)pe_header->ImageBase;
|
|
|
|
}
|
|
|
|
|
2016-02-23 02:52:51 +08:00
|
|
|
void ListOfModules::init() {
|
2017-09-30 04:55:06 +08:00
|
|
|
clearOrInit();
|
2015-04-06 20:49:30 +08:00
|
|
|
HANDLE cur_process = GetCurrentProcess();
|
|
|
|
|
|
|
|
// Query the list of modules. Start by assuming there are no more than 256
|
|
|
|
// modules and retry if that's not sufficient.
|
|
|
|
HMODULE *hmodules = 0;
|
|
|
|
uptr modules_buffer_size = sizeof(HMODULE) * 256;
|
|
|
|
DWORD bytes_required;
|
|
|
|
while (!hmodules) {
|
|
|
|
hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
|
|
|
|
CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
|
|
|
|
&bytes_required));
|
|
|
|
if (bytes_required > modules_buffer_size) {
|
|
|
|
// Either there turned out to be more than 256 hmodules, or new hmodules
|
|
|
|
// could have loaded since the last try. Retry.
|
|
|
|
UnmapOrDie(hmodules, modules_buffer_size);
|
|
|
|
hmodules = 0;
|
|
|
|
modules_buffer_size = bytes_required;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// |num_modules| is the number of modules actually present,
|
2016-02-23 02:52:51 +08:00
|
|
|
size_t num_modules = bytes_required / sizeof(HMODULE);
|
|
|
|
for (size_t i = 0; i < num_modules; ++i) {
|
2015-04-06 20:49:30 +08:00
|
|
|
HMODULE handle = hmodules[i];
|
|
|
|
MODULEINFO mi;
|
|
|
|
if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
|
|
|
|
continue;
|
|
|
|
|
2015-08-04 03:51:18 +08:00
|
|
|
// Get the UTF-16 path and convert to UTF-8.
|
|
|
|
wchar_t modname_utf16[kMaxPathLength];
|
|
|
|
int modname_utf16_len =
|
|
|
|
GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
|
|
|
|
if (modname_utf16_len == 0)
|
|
|
|
modname_utf16[0] = '\0';
|
|
|
|
char module_name[kMaxPathLength];
|
|
|
|
int module_name_len =
|
|
|
|
::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
|
|
|
|
&module_name[0], kMaxPathLength, NULL, NULL);
|
|
|
|
module_name[module_name_len] = '\0';
|
2015-04-06 20:49:30 +08:00
|
|
|
|
|
|
|
uptr base_address = (uptr)mi.lpBaseOfDll;
|
|
|
|
uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
|
2015-08-04 03:51:18 +08:00
|
|
|
|
|
|
|
// Adjust the base address of the module so that we get a VA instead of an
|
|
|
|
// RVA when computing the module offset. This helps llvm-symbolizer find the
|
|
|
|
// right DWARF CU. In the common case that the image is loaded at it's
|
|
|
|
// preferred address, we will now print normal virtual addresses.
|
|
|
|
uptr preferred_base = GetPreferredBase(&module_name[0]);
|
|
|
|
uptr adjusted_base = base_address - preferred_base;
|
|
|
|
|
2016-02-23 02:52:51 +08:00
|
|
|
LoadedModule cur_module;
|
|
|
|
cur_module.set(module_name, adjusted_base);
|
2015-04-06 20:49:30 +08:00
|
|
|
// We add the whole module as one single address range.
|
2017-04-18 00:34:38 +08:00
|
|
|
cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
|
2017-05-19 21:34:02 +08:00
|
|
|
/*writable*/ true);
|
2016-02-23 02:52:51 +08:00
|
|
|
modules_.push_back(cur_module);
|
2015-04-06 20:49:30 +08:00
|
|
|
}
|
|
|
|
UnmapOrDie(hmodules, modules_buffer_size);
|
2017-10-03 04:22:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ListOfModules::fallbackInit() { clear(); }
|
2013-09-10 22:36:16 +08:00
|
|
|
|
2015-04-02 22:48:08 +08:00
|
|
|
// We can't use atexit() directly at __asan_init time as the CRT is not fully
|
|
|
|
// initialized at this point. Place the functions into a vector and use
|
|
|
|
// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
|
|
|
|
InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
|
|
|
|
|
2012-06-15 14:37:34 +08:00
|
|
|
int Atexit(void (*function)(void)) {
|
2015-04-02 22:48:08 +08:00
|
|
|
atexit_functions.push_back(function);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int RunAtexit() {
|
2019-02-28 07:43:50 +08:00
|
|
|
TraceLoggingUnregister(g_asan_provider);
|
2015-04-02 22:48:08 +08:00
|
|
|
int ret = 0;
|
|
|
|
for (uptr i = 0; i < atexit_functions.size(); ++i) {
|
|
|
|
ret |= atexit(atexit_functions[i]);
|
|
|
|
}
|
|
|
|
return ret;
|
2012-06-15 14:37:34 +08:00
|
|
|
}
|
2015-04-02 22:48:08 +08:00
|
|
|
|
2019-09-12 07:19:48 +08:00
|
|
|
#pragma section(".CRT$XID", long, read)
|
2015-08-14 00:40:54 +08:00
|
|
|
__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
|
2012-11-06 21:19:59 +08:00
|
|
|
#endif
|
2012-06-15 14:37:34 +08:00
|
|
|
|
2012-06-07 15:13:46 +08:00
|
|
|
// ------------------ sanitizer_libc.h
|
2015-04-09 20:37:05 +08:00
|
|
|
fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
|
2016-06-23 23:40:42 +08:00
|
|
|
// FIXME: Use the wide variants to handle Unicode filenames.
|
2015-08-04 03:51:18 +08:00
|
|
|
fd_t res;
|
|
|
|
if (mode == RdOnly) {
|
2016-06-23 23:40:42 +08:00
|
|
|
res = CreateFileA(filename, GENERIC_READ,
|
|
|
|
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
|
|
|
|
nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
|
2015-08-04 03:51:18 +08:00
|
|
|
} else if (mode == WrOnly) {
|
2016-06-23 23:40:42 +08:00
|
|
|
res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
|
|
|
|
FILE_ATTRIBUTE_NORMAL, nullptr);
|
2015-08-04 03:51:18 +08:00
|
|
|
} else {
|
2015-04-09 23:25:21 +08:00
|
|
|
UNIMPLEMENTED();
|
2015-08-04 03:51:18 +08:00
|
|
|
}
|
2015-04-09 23:25:21 +08:00
|
|
|
CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
|
|
|
|
CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
|
2015-04-23 20:57:29 +08:00
|
|
|
if (res == kInvalidFd && last_error)
|
|
|
|
*last_error = GetLastError();
|
2015-04-09 23:25:21 +08:00
|
|
|
return res;
|
2012-06-05 16:32:53 +08:00
|
|
|
}
|
|
|
|
|
2015-04-09 20:37:05 +08:00
|
|
|
void CloseFile(fd_t fd) {
|
2015-04-09 23:25:21 +08:00
|
|
|
CloseHandle(fd);
|
2012-06-05 15:05:10 +08:00
|
|
|
}
|
|
|
|
|
2015-04-09 21:38:14 +08:00
|
|
|
bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
|
|
|
|
error_t *error_p) {
|
2015-08-04 03:51:18 +08:00
|
|
|
CHECK(fd != kInvalidFd);
|
2015-08-06 01:55:26 +08:00
|
|
|
|
|
|
|
// bytes_read can't be passed directly to ReadFile:
|
|
|
|
// uptr is unsigned long long on 64-bit Windows.
|
|
|
|
unsigned long num_read_long;
|
|
|
|
|
|
|
|
bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
|
2015-08-04 03:51:18 +08:00
|
|
|
if (!success && error_p)
|
|
|
|
*error_p = GetLastError();
|
2015-08-06 01:55:26 +08:00
|
|
|
if (bytes_read)
|
|
|
|
*bytes_read = num_read_long;
|
2015-08-04 03:51:18 +08:00
|
|
|
return success;
|
2015-04-09 21:38:14 +08:00
|
|
|
}
|
|
|
|
|
2015-04-09 01:42:57 +08:00
|
|
|
bool SupportsColoredOutput(fd_t fd) {
|
|
|
|
// FIXME: support colored output.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-04-09 22:11:25 +08:00
|
|
|
bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
|
|
|
|
error_t *error_p) {
|
2015-04-09 23:25:21 +08:00
|
|
|
CHECK(fd != kInvalidFd);
|
|
|
|
|
2015-08-11 07:40:27 +08:00
|
|
|
// Handle null optional parameters.
|
|
|
|
error_t dummy_error;
|
|
|
|
error_p = error_p ? error_p : &dummy_error;
|
|
|
|
uptr dummy_bytes_written;
|
|
|
|
bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
|
|
|
|
|
|
|
|
// Initialize output parameters in case we fail.
|
|
|
|
*error_p = 0;
|
|
|
|
*bytes_written = 0;
|
|
|
|
|
|
|
|
// Map the conventional Unix fds 1 and 2 to Windows handles. They might be
|
|
|
|
// closed, in which case this will fail.
|
|
|
|
if (fd == kStdoutFd || fd == kStderrFd) {
|
|
|
|
fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
|
|
|
|
if (fd == 0) {
|
|
|
|
*error_p = ERROR_INVALID_HANDLE;
|
|
|
|
return false;
|
|
|
|
}
|
2014-02-05 07:28:30 +08:00
|
|
|
}
|
|
|
|
|
2015-08-11 07:40:27 +08:00
|
|
|
DWORD bytes_written_32;
|
|
|
|
if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
|
|
|
|
*error_p = GetLastError();
|
2015-04-09 23:25:21 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
2015-08-11 07:40:27 +08:00
|
|
|
*bytes_written = bytes_written_32;
|
2015-04-09 22:11:25 +08:00
|
|
|
return true;
|
|
|
|
}
|
2012-06-05 16:32:53 +08:00
|
|
|
}
|
|
|
|
|
2013-05-08 22:43:49 +08:00
|
|
|
uptr internal_sched_yield() {
|
2012-11-06 21:19:59 +08:00
|
|
|
Sleep(0);
|
|
|
|
return 0;
|
2012-06-18 16:44:30 +08:00
|
|
|
}
|
|
|
|
|
2013-02-20 21:54:32 +08:00
|
|
|
void internal__exit(int exitcode) {
|
2019-02-28 07:43:50 +08:00
|
|
|
TraceLoggingUnregister(g_asan_provider);
|
2016-11-10 05:27:58 +08:00
|
|
|
// ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
|
2016-11-11 04:44:05 +08:00
|
|
|
// The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
|
|
|
|
// so add our own breakpoint here.
|
|
|
|
if (::IsDebuggerPresent())
|
|
|
|
__debugbreak();
|
2016-11-12 01:51:51 +08:00
|
|
|
TerminateProcess(GetCurrentProcess(), exitcode);
|
2017-01-05 08:37:13 +08:00
|
|
|
BUILTIN_UNREACHABLE();
|
2013-02-20 21:54:32 +08:00
|
|
|
}
|
|
|
|
|
2014-05-28 16:26:24 +08:00
|
|
|
uptr internal_ftruncate(fd_t fd, uptr size) {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2014-12-09 09:22:59 +08:00
|
|
|
uptr GetRSS() {
|
2018-02-14 01:05:54 +08:00
|
|
|
PROCESS_MEMORY_COUNTERS counters;
|
|
|
|
if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)))
|
|
|
|
return 0;
|
|
|
|
return counters.WorkingSetSize;
|
2014-12-09 09:22:59 +08:00
|
|
|
}
|
|
|
|
|
2020-01-24 05:01:08 +08:00
|
|
|
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
|
2014-12-17 04:46:05 +08:00
|
|
|
void internal_join_thread(void *th) { }
|
2014-12-17 03:13:01 +08:00
|
|
|
|
2013-01-14 15:51:39 +08:00
|
|
|
// ---------------------- BlockingMutex ---------------- {{{1
|
2013-03-14 21:30:56 +08:00
|
|
|
|
|
|
|
BlockingMutex::BlockingMutex() {
|
[asan/win] Use SRW locks to fix a race in BlockingMutex
Summary:
Before my change, BlockingMutex used Windows critial sections. Critical
sections can only be initialized by calling InitializeCriticalSection,
dynamically.
The primary sanitizer allocator expects to be able to reinterpret zero
initialized memory as a BlockingMutex and immediately lock it.
RegionInfo contains a mutex, and it placement new is never called for
it. These objects are accessed via:
RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
return ®ions[class_id];
}
The memory comes from the OS without any other initialization.
For various reasons described in the comments, BlockingMutex::Lock would
check if the object appeared to be zero-initialized, and it would lazily
call the LinkerInitialized constructor to initialize the critical
section. This pattern is obviously racy, and the code had a bunch of
FIXMEs about it.
The best fix here is to use slim reader writer locks, which can start
out zero-initialized. They are available starting in Windows Vista. I
think it's safe to go ahead and use them today.
Reviewers: kcc, vitalybuka
Subscribers: kubamracek, llvm-commits
Differential Revision: https://reviews.llvm.org/D49893
llvm-svn: 338331
2018-07-31 07:32:33 +08:00
|
|
|
CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
|
|
|
|
internal_memset(this, 0, sizeof(*this));
|
2013-01-14 15:51:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingMutex::Lock() {
|
[asan/win] Use SRW locks to fix a race in BlockingMutex
Summary:
Before my change, BlockingMutex used Windows critial sections. Critical
sections can only be initialized by calling InitializeCriticalSection,
dynamically.
The primary sanitizer allocator expects to be able to reinterpret zero
initialized memory as a BlockingMutex and immediately lock it.
RegionInfo contains a mutex, and it placement new is never called for
it. These objects are accessed via:
RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
return ®ions[class_id];
}
The memory comes from the OS without any other initialization.
For various reasons described in the comments, BlockingMutex::Lock would
check if the object appeared to be zero-initialized, and it would lazily
call the LinkerInitialized constructor to initialize the critical
section. This pattern is obviously racy, and the code had a bunch of
FIXMEs about it.
The best fix here is to use slim reader writer locks, which can start
out zero-initialized. They are available starting in Windows Vista. I
think it's safe to go ahead and use them today.
Reviewers: kcc, vitalybuka
Subscribers: kubamracek, llvm-commits
Differential Revision: https://reviews.llvm.org/D49893
llvm-svn: 338331
2018-07-31 07:32:33 +08:00
|
|
|
AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
|
|
|
|
CHECK_EQ(owner_, 0);
|
2013-01-14 15:51:39 +08:00
|
|
|
owner_ = GetThreadSelf();
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingMutex::Unlock() {
|
[asan/win] Use SRW locks to fix a race in BlockingMutex
Summary:
Before my change, BlockingMutex used Windows critial sections. Critical
sections can only be initialized by calling InitializeCriticalSection,
dynamically.
The primary sanitizer allocator expects to be able to reinterpret zero
initialized memory as a BlockingMutex and immediately lock it.
RegionInfo contains a mutex, and it placement new is never called for
it. These objects are accessed via:
RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
return ®ions[class_id];
}
The memory comes from the OS without any other initialization.
For various reasons described in the comments, BlockingMutex::Lock would
check if the object appeared to be zero-initialized, and it would lazily
call the LinkerInitialized constructor to initialize the critical
section. This pattern is obviously racy, and the code had a bunch of
FIXMEs about it.
The best fix here is to use slim reader writer locks, which can start
out zero-initialized. They are available starting in Windows Vista. I
think it's safe to go ahead and use them today.
Reviewers: kcc, vitalybuka
Subscribers: kubamracek, llvm-commits
Differential Revision: https://reviews.llvm.org/D49893
llvm-svn: 338331
2018-07-31 07:32:33 +08:00
|
|
|
CheckLocked();
|
|
|
|
owner_ = 0;
|
|
|
|
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
|
2013-01-14 15:51:39 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 23:45:20 +08:00
|
|
|
void BlockingMutex::CheckLocked() {
|
|
|
|
CHECK_EQ(owner_, GetThreadSelf());
|
|
|
|
}
|
|
|
|
|
2013-03-13 16:19:53 +08:00
|
|
|
uptr GetTlsSize() {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void InitTlsSize() {
|
|
|
|
}
|
|
|
|
|
2013-05-07 22:41:43 +08:00
|
|
|
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
|
|
|
|
uptr *tls_addr, uptr *tls_size) {
|
2016-10-29 04:14:18 +08:00
|
|
|
#if SANITIZER_GO
|
2013-06-10 18:30:54 +08:00
|
|
|
*stk_addr = 0;
|
|
|
|
*stk_size = 0;
|
|
|
|
*tls_addr = 0;
|
|
|
|
*tls_size = 0;
|
|
|
|
#else
|
2013-05-07 22:41:43 +08:00
|
|
|
uptr stack_top, stack_bottom;
|
|
|
|
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
|
|
|
|
*stk_addr = stack_bottom;
|
|
|
|
*stk_size = stack_top - stack_bottom;
|
|
|
|
*tls_addr = 0;
|
|
|
|
*tls_size = 0;
|
2013-06-10 18:30:54 +08:00
|
|
|
#endif
|
2013-05-07 22:41:43 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 02:30:25 +08:00
|
|
|
void ReportFile::Write(const char *buffer, uptr length) {
|
|
|
|
SpinMutexLock l(mu);
|
|
|
|
ReopenIfNecessary();
|
2015-04-09 22:11:25 +08:00
|
|
|
if (!WriteToFile(fd, buffer, length)) {
|
2013-09-05 11:19:57 +08:00
|
|
|
// stderr may be closed, but we may be able to print to the debugger
|
|
|
|
// instead. This is the case when launching a program from Visual Studio,
|
|
|
|
// and the following routine should write to its console.
|
|
|
|
OutputDebugStringA(buffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-28 19:12:29 +08:00
|
|
|
void SetAlternateSignalStack() {
|
|
|
|
// FIXME: Decide what to do on Windows.
|
|
|
|
}
|
|
|
|
|
|
|
|
void UnsetAlternateSignalStack() {
|
|
|
|
// FIXME: Decide what to do on Windows.
|
|
|
|
}
|
|
|
|
|
2014-01-31 23:11:11 +08:00
|
|
|
void InstallDeadlySignalHandlers(SignalHandlerType handler) {
|
|
|
|
(void)handler;
|
2014-01-31 21:10:07 +08:00
|
|
|
// FIXME: Decide what to do on Windows.
|
|
|
|
}
|
|
|
|
|
2017-05-26 07:42:33 +08:00
|
|
|
HandleSignalMode GetHandleSignalMode(int signum) {
|
2014-01-31 21:10:07 +08:00
|
|
|
// FIXME: Decide what to do on Windows.
|
2017-05-26 07:42:33 +08:00
|
|
|
return kHandleSignalNo;
|
2014-01-31 21:10:07 +08:00
|
|
|
}
|
|
|
|
|
2017-02-03 07:01:51 +08:00
|
|
|
// Check based on flags if we should handle this exception.
|
|
|
|
bool IsHandledDeadlyException(DWORD exceptionCode) {
|
|
|
|
switch (exceptionCode) {
|
|
|
|
case EXCEPTION_ACCESS_VIOLATION:
|
|
|
|
case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
|
|
|
|
case EXCEPTION_STACK_OVERFLOW:
|
|
|
|
case EXCEPTION_DATATYPE_MISALIGNMENT:
|
|
|
|
case EXCEPTION_IN_PAGE_ERROR:
|
|
|
|
return common_flags()->handle_segv;
|
|
|
|
case EXCEPTION_ILLEGAL_INSTRUCTION:
|
|
|
|
case EXCEPTION_PRIV_INSTRUCTION:
|
|
|
|
case EXCEPTION_BREAKPOINT:
|
|
|
|
return common_flags()->handle_sigill;
|
|
|
|
case EXCEPTION_FLT_DENORMAL_OPERAND:
|
|
|
|
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
|
|
|
|
case EXCEPTION_FLT_INEXACT_RESULT:
|
|
|
|
case EXCEPTION_FLT_INVALID_OPERATION:
|
|
|
|
case EXCEPTION_FLT_OVERFLOW:
|
|
|
|
case EXCEPTION_FLT_STACK_CHECK:
|
|
|
|
case EXCEPTION_FLT_UNDERFLOW:
|
|
|
|
case EXCEPTION_INT_DIVIDE_BY_ZERO:
|
|
|
|
case EXCEPTION_INT_OVERFLOW:
|
|
|
|
return common_flags()->handle_sigfpe;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-09-18 01:56:15 +08:00
|
|
|
bool IsAccessibleMemoryRange(uptr beg, uptr size) {
|
2015-07-03 06:08:38 +08:00
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetNativeSystemInfo(&si);
|
|
|
|
uptr page_size = si.dwPageSize;
|
|
|
|
uptr page_mask = ~(page_size - 1);
|
|
|
|
|
|
|
|
for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
|
|
|
|
page <= end;) {
|
|
|
|
MEMORY_BASIC_INFORMATION info;
|
|
|
|
if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
|
|
|
|
info.Protect == PAGE_EXECUTE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (info.RegionSize == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
page += info.RegionSize;
|
|
|
|
}
|
|
|
|
|
2014-09-18 01:56:15 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-14 11:23:02 +08:00
|
|
|
bool SignalContext::IsStackOverflow() const {
|
2017-10-31 01:26:57 +08:00
|
|
|
return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;
|
2017-09-14 11:23:02 +08:00
|
|
|
}
|
|
|
|
|
2017-09-14 10:48:41 +08:00
|
|
|
void SignalContext::InitPcSpBp() {
|
2016-02-09 08:28:57 +08:00
|
|
|
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
|
|
|
|
CONTEXT *context_record = (CONTEXT *)context;
|
2015-03-03 01:45:18 +08:00
|
|
|
|
2017-09-14 10:48:41 +08:00
|
|
|
pc = (uptr)exception_record->ExceptionAddress;
|
2015-03-03 01:45:18 +08:00
|
|
|
#ifdef _WIN64
|
2017-09-14 10:48:41 +08:00
|
|
|
bp = (uptr)context_record->Rbp;
|
|
|
|
sp = (uptr)context_record->Rsp;
|
2015-03-03 01:45:18 +08:00
|
|
|
#else
|
2017-09-14 10:48:41 +08:00
|
|
|
bp = (uptr)context_record->Ebp;
|
|
|
|
sp = (uptr)context_record->Esp;
|
2015-03-03 01:45:18 +08:00
|
|
|
#endif
|
2017-09-14 10:48:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
uptr SignalContext::GetAddress() const {
|
|
|
|
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
|
2020-12-01 02:21:14 +08:00
|
|
|
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
|
|
|
|
return exception_record->ExceptionInformation[1];
|
|
|
|
return (uptr)exception_record->ExceptionAddress;
|
2017-09-14 10:48:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SignalContext::IsMemoryAccess() const {
|
2020-12-01 02:21:14 +08:00
|
|
|
return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode ==
|
|
|
|
EXCEPTION_ACCESS_VIOLATION;
|
2017-09-14 10:48:41 +08:00
|
|
|
}
|
2015-03-03 01:45:18 +08:00
|
|
|
|
2020-12-01 02:21:14 +08:00
|
|
|
bool SignalContext::IsTrueFaultingAddress() const { return true; }
|
2019-10-11 01:19:58 +08:00
|
|
|
|
2017-09-14 10:48:41 +08:00
|
|
|
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
|
|
|
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
|
2020-12-01 02:21:14 +08:00
|
|
|
|
|
|
|
// The write flag is only available for access violation exceptions.
|
|
|
|
if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
|
|
|
|
return SignalContext::UNKNOWN;
|
|
|
|
|
2016-02-12 00:44:35 +08:00
|
|
|
// The contents of this array are documented at
|
2020-12-01 02:21:14 +08:00
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
|
2016-02-12 00:44:35 +08:00
|
|
|
// The first element indicates read as 0, write as 1, or execute as 8. The
|
|
|
|
// second element is the faulting address.
|
|
|
|
switch (exception_record->ExceptionInformation[0]) {
|
2017-09-14 10:48:41 +08:00
|
|
|
case 0:
|
|
|
|
return SignalContext::READ;
|
|
|
|
case 1:
|
|
|
|
return SignalContext::WRITE;
|
|
|
|
case 8:
|
|
|
|
return SignalContext::UNKNOWN;
|
2016-02-12 00:44:35 +08:00
|
|
|
}
|
2017-09-14 10:48:41 +08:00
|
|
|
return SignalContext::UNKNOWN;
|
2015-03-03 01:45:18 +08:00
|
|
|
}
|
|
|
|
|
2016-11-26 08:50:08 +08:00
|
|
|
void SignalContext::DumpAllRegisters(void *context) {
|
|
|
|
// FIXME: Implement this.
|
|
|
|
}
|
|
|
|
|
2017-09-14 02:30:06 +08:00
|
|
|
int SignalContext::GetType() const {
|
|
|
|
return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;
|
|
|
|
}
|
|
|
|
|
2017-09-14 02:30:16 +08:00
|
|
|
const char *SignalContext::Describe() const {
|
|
|
|
unsigned code = GetType();
|
|
|
|
// Get the string description of the exception if this is a known deadly
|
|
|
|
// exception.
|
|
|
|
switch (code) {
|
|
|
|
case EXCEPTION_ACCESS_VIOLATION:
|
|
|
|
return "access-violation";
|
|
|
|
case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
|
|
|
|
return "array-bounds-exceeded";
|
|
|
|
case EXCEPTION_STACK_OVERFLOW:
|
|
|
|
return "stack-overflow";
|
|
|
|
case EXCEPTION_DATATYPE_MISALIGNMENT:
|
|
|
|
return "datatype-misalignment";
|
|
|
|
case EXCEPTION_IN_PAGE_ERROR:
|
|
|
|
return "in-page-error";
|
|
|
|
case EXCEPTION_ILLEGAL_INSTRUCTION:
|
|
|
|
return "illegal-instruction";
|
|
|
|
case EXCEPTION_PRIV_INSTRUCTION:
|
|
|
|
return "priv-instruction";
|
|
|
|
case EXCEPTION_BREAKPOINT:
|
|
|
|
return "breakpoint";
|
|
|
|
case EXCEPTION_FLT_DENORMAL_OPERAND:
|
|
|
|
return "flt-denormal-operand";
|
|
|
|
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
|
|
|
|
return "flt-divide-by-zero";
|
|
|
|
case EXCEPTION_FLT_INEXACT_RESULT:
|
|
|
|
return "flt-inexact-result";
|
|
|
|
case EXCEPTION_FLT_INVALID_OPERATION:
|
|
|
|
return "flt-invalid-operation";
|
|
|
|
case EXCEPTION_FLT_OVERFLOW:
|
|
|
|
return "flt-overflow";
|
|
|
|
case EXCEPTION_FLT_STACK_CHECK:
|
|
|
|
return "flt-stack-check";
|
|
|
|
case EXCEPTION_FLT_UNDERFLOW:
|
|
|
|
return "flt-underflow";
|
|
|
|
case EXCEPTION_INT_DIVIDE_BY_ZERO:
|
|
|
|
return "int-divide-by-zero";
|
|
|
|
case EXCEPTION_INT_OVERFLOW:
|
|
|
|
return "int-overflow";
|
|
|
|
}
|
|
|
|
return "unknown exception";
|
|
|
|
}
|
|
|
|
|
2015-06-04 15:29:43 +08:00
|
|
|
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
|
|
|
|
// FIXME: Actually implement this function.
|
|
|
|
CHECK_GT(buf_len, 0);
|
|
|
|
buf[0] = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-29 05:01:42 +08:00
|
|
|
uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
|
|
|
|
return ReadBinaryName(buf, buf_len);
|
|
|
|
}
|
|
|
|
|
2015-09-11 21:55:00 +08:00
|
|
|
void CheckVMASize() {
|
|
|
|
// Do nothing.
|
|
|
|
}
|
|
|
|
|
2018-11-07 03:55:19 +08:00
|
|
|
void InitializePlatformEarly() {
|
|
|
|
// Do nothing.
|
|
|
|
}
|
|
|
|
|
2015-12-03 18:39:43 +08:00
|
|
|
void MaybeReexec() {
|
|
|
|
// No need to re-exec on Windows.
|
|
|
|
}
|
|
|
|
|
2018-06-05 15:29:23 +08:00
|
|
|
void CheckASLR() {
|
|
|
|
// Do nothing
|
|
|
|
}
|
|
|
|
|
2018-12-23 23:09:28 +08:00
|
|
|
void CheckMPROTECT() {
|
|
|
|
// Do nothing
|
|
|
|
}
|
|
|
|
|
2016-01-18 15:55:12 +08:00
|
|
|
char **GetArgv() {
|
|
|
|
// FIXME: Actually implement this function.
|
2018-11-07 03:23:12 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
char **GetEnviron() {
|
|
|
|
// FIXME: Actually implement this function.
|
2016-01-18 15:55:12 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-27 04:10:01 +08:00
|
|
|
pid_t StartSubprocess(const char *program, const char *const argv[],
|
2020-03-24 10:57:33 +08:00
|
|
|
const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,
|
|
|
|
fd_t stderr_fd) {
|
2016-01-27 04:10:01 +08:00
|
|
|
// FIXME: implement on this platform
|
|
|
|
// Should be implemented based on
|
|
|
|
// SymbolizerProcess::StarAtSymbolizerSubprocess
|
2019-08-01 02:51:27 +08:00
|
|
|
// from lib/sanitizer_common/sanitizer_symbolizer_win.cpp.
|
2016-01-27 04:10:01 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsProcessRunning(pid_t pid) {
|
|
|
|
// FIXME: implement on this platform.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-01-28 07:51:36 +08:00
|
|
|
int WaitForProcess(pid_t pid) { return -1; }
|
|
|
|
|
2016-07-22 05:38:40 +08:00
|
|
|
// FIXME implement on this platform.
|
|
|
|
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
|
|
|
|
|
2017-03-09 18:47:38 +08:00
|
|
|
void CheckNoDeepBind(const char *filename, int flag) {
|
|
|
|
// Do nothing.
|
|
|
|
}
|
2016-07-22 05:38:40 +08:00
|
|
|
|
[sanitizer] Add a function to gather random bytes
Summary:
AFAICT compiler-rt doesn't have a function that would return 'good' random
bytes to seed a PRNG. Currently, the `SizeClassAllocator64` uses addresses
returned by `mmap` to seed its PRNG, which is not ideal, and
`SizeClassAllocator32` doesn't benefit from the entropy offered by its 64-bit
counterpart address space, so right now it has nothing. This function aims at
solving this, allowing to implement good 32-bit chunk randomization. Scudo also
has a function that does this for Cookie purposes, which would go away in a
later CL once this lands.
This function will try the `getrandom` syscall if available, and fallback to
`/dev/urandom` if not.
Unfortunately, I do not have a way to implement and test a Mac and Windows
version, so those are unimplemented as of now. Note that `kRandomShuffleChunks`
is only used on Linux for now.
Reviewers: alekseyshl
Reviewed By: alekseyshl
Subscribers: zturner, rnk, llvm-commits, kubamracek
Differential Revision: https://reviews.llvm.org/D34412
llvm-svn: 305922
2017-06-21 23:56:03 +08:00
|
|
|
// FIXME: implement on this platform.
|
2017-08-14 22:53:47 +08:00
|
|
|
bool GetRandom(void *buffer, uptr length, bool blocking) {
|
[sanitizer] Add a function to gather random bytes
Summary:
AFAICT compiler-rt doesn't have a function that would return 'good' random
bytes to seed a PRNG. Currently, the `SizeClassAllocator64` uses addresses
returned by `mmap` to seed its PRNG, which is not ideal, and
`SizeClassAllocator32` doesn't benefit from the entropy offered by its 64-bit
counterpart address space, so right now it has nothing. This function aims at
solving this, allowing to implement good 32-bit chunk randomization. Scudo also
has a function that does this for Cookie purposes, which would go away in a
later CL once this lands.
This function will try the `getrandom` syscall if available, and fallback to
`/dev/urandom` if not.
Unfortunately, I do not have a way to implement and test a Mac and Windows
version, so those are unimplemented as of now. Note that `kRandomShuffleChunks`
is only used on Linux for now.
Reviewers: alekseyshl
Reviewed By: alekseyshl
Subscribers: zturner, rnk, llvm-commits, kubamracek
Differential Revision: https://reviews.llvm.org/D34412
llvm-svn: 305922
2017-06-21 23:56:03 +08:00
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
2017-11-22 05:14:00 +08:00
|
|
|
u32 GetNumberOfCPUs() {
|
2018-04-24 01:05:47 +08:00
|
|
|
SYSTEM_INFO sysinfo = {};
|
2018-01-26 04:14:35 +08:00
|
|
|
GetNativeSystemInfo(&sysinfo);
|
|
|
|
return sysinfo.dwNumberOfProcessors;
|
2017-11-22 05:14:00 +08:00
|
|
|
}
|
|
|
|
|
2019-03-02 08:46:54 +08:00
|
|
|
#if SANITIZER_WIN_TRACE
|
2019-02-28 09:20:38 +08:00
|
|
|
// TODO(mcgov): Rename this project-wide to PlatformLogInit
|
2019-02-28 07:43:50 +08:00
|
|
|
void AndroidLogInit(void) {
|
|
|
|
HRESULT hr = TraceLoggingRegister(g_asan_provider);
|
|
|
|
if (!SUCCEEDED(hr))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetAbortMessage(const char *) {}
|
|
|
|
|
|
|
|
void LogFullErrorReport(const char *buffer) {
|
|
|
|
if (common_flags()->log_to_syslog) {
|
|
|
|
InternalMmapVector<wchar_t> filename;
|
|
|
|
DWORD filename_length = 0;
|
|
|
|
do {
|
|
|
|
filename.resize(filename.size() + 0x100);
|
|
|
|
filename_length =
|
2019-03-01 16:08:11 +08:00
|
|
|
GetModuleFileNameW(NULL, filename.begin(), filename.size());
|
2019-02-28 07:43:50 +08:00
|
|
|
} while (filename_length >= filename.size());
|
|
|
|
TraceLoggingWrite(g_asan_provider, "AsanReportEvent",
|
|
|
|
TraceLoggingValue(filename.begin(), "ExecutableName"),
|
|
|
|
TraceLoggingValue(buffer, "AsanReportContents"));
|
|
|
|
}
|
|
|
|
}
|
2019-03-02 08:46:54 +08:00
|
|
|
#endif // SANITIZER_WIN_TRACE
|
2019-02-28 07:43:50 +08:00
|
|
|
|
2020-11-05 10:52:15 +08:00
|
|
|
void InitializePlatformCommonFlags(CommonFlags *cf) {}
|
|
|
|
|
2012-06-05 15:05:10 +08:00
|
|
|
} // namespace __sanitizer
|
|
|
|
|
|
|
|
#endif // _WIN32
|