2019-02-27 23:44:03 +08:00
|
|
|
//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
|
2017-12-09 09:31:51 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-12-09 09:31:51 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-04-24 02:19:23 +08:00
|
|
|
///
|
|
|
|
/// \file
|
|
|
|
/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
|
|
|
|
/// FreeBSD-specific code.
|
|
|
|
///
|
2017-12-09 09:31:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "sanitizer_common/sanitizer_platform.h"
|
|
|
|
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# include <dlfcn.h>
|
|
|
|
# include <elf.h>
|
|
|
|
# include <errno.h>
|
|
|
|
# include <link.h>
|
|
|
|
# include <pthread.h>
|
|
|
|
# include <signal.h>
|
|
|
|
# include <stdio.h>
|
|
|
|
# include <stdlib.h>
|
|
|
|
# include <sys/prctl.h>
|
|
|
|
# include <sys/resource.h>
|
|
|
|
# include <sys/time.h>
|
|
|
|
# include <unistd.h>
|
|
|
|
# include <unwind.h>
|
|
|
|
|
|
|
|
# include "hwasan.h"
|
|
|
|
# include "hwasan_dynamic_shadow.h"
|
|
|
|
# include "hwasan_interface_internal.h"
|
|
|
|
# include "hwasan_mapping.h"
|
|
|
|
# include "hwasan_report.h"
|
|
|
|
# include "hwasan_thread.h"
|
|
|
|
# include "hwasan_thread_list.h"
|
|
|
|
# include "sanitizer_common/sanitizer_common.h"
|
|
|
|
# include "sanitizer_common/sanitizer_procmaps.h"
|
2021-08-14 08:09:10 +08:00
|
|
|
# include "sanitizer_common/sanitizer_stackdepot.h"
|
2017-12-09 09:31:51 +08:00
|
|
|
|
2019-05-04 02:20:16 +08:00
|
|
|
// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
|
|
|
|
//
|
|
|
|
// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
|
|
|
|
// Not currently tested.
|
|
|
|
// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
|
|
|
|
// Integration tests downstream exist.
|
|
|
|
// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
|
|
|
|
// Tested with check-hwasan on x86_64-linux.
|
|
|
|
// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
|
|
|
|
// Tested with check-hwasan on aarch64-linux-android.
|
2021-08-18 01:57:25 +08:00
|
|
|
# if !SANITIZER_ANDROID
|
hwasan: Use system allocator to realloc and free untagged pointers in interceptor mode.
The Android dynamic loader has a non-standard feature that allows
libraries such as the hwasan runtime to interpose symbols even after
the symbol already has a value. The new value of the symbol is used to
relocate libraries loaded after the interposing library, but existing
libraries keep the old value. This behaviour is activated by the
DF_1_GLOBAL flag in DT_FLAGS_1, which is set by passing -z global to
the linker, which is what we already do to link the hwasan runtime.
What this means in practice is that if we have .so files that depend
on interceptor-mode hwasan without the main executable depending on
it, some of the libraries in the process will be using the hwasan
allocator and some will be using the system allocator, and these
allocators need to interact somehow. For example, if an instrumented
library calls a function such as strdup that allocates memory on
behalf of the caller, the instrumented library can reasonably expect
to be able to call free to deallocate the memory.
We can handle that relatively easily with hwasan by using tag 0 to
represent allocations from the system allocator. If hwasan's realloc
or free functions are passed a pointer with tag 0, the system allocator
is called.
One limitation is that this scheme doesn't work in reverse: if an
instrumented library allocates memory, it must free the memory itself
and cannot pass ownership to a system library. In a future change,
we may want to expose an API for calling the system allocator so
that instrumented libraries can safely transfer ownership of memory
to system libraries.
Differential Revision: https://reviews.llvm.org/D55986
llvm-svn: 350427
2019-01-05 03:21:51 +08:00
|
|
|
SANITIZER_INTERFACE_ATTRIBUTE
|
2018-09-25 07:03:34 +08:00
|
|
|
THREADLOCAL uptr __hwasan_tls;
|
2021-08-18 01:57:25 +08:00
|
|
|
# endif
|
2018-09-25 07:03:34 +08:00
|
|
|
|
2017-12-13 09:16:34 +08:00
|
|
|
namespace __hwasan {
|
2017-12-09 09:31:51 +08:00
|
|
|
|
[compiler-rt][asan][hwasan] Refactor shadow setup into sanitizer_common (NFCI)
Summary:
This refactors some common support related to shadow memory setup from
asan and hwasan into sanitizer_common. This should not only reduce code
duplication but also make these facilities available for new compiler-rt
uses (e.g. heap profiling).
In most cases the separate copies of the code were either identical, or
at least functionally identical. A few notes:
In ProtectGap, the asan version checked the address against an upper
bound (kZeroBaseMaxShadowStart, which is (2^18). I have created a copy
of kZeroBaseMaxShadowStart in hwasan_mapping.h, with the same value, as
it isn't clear why that code should not do the same check. If it
shouldn't, I can remove this and guard this check so that it only
happens for asan.
In asan's InitializeShadowMemory, in the dynamic shadow case it was
setting __asan_shadow_memory_dynamic_address to 0 (which then sets both
macro SHADOW_OFFSET as well as macro kLowShadowBeg to 0) before calling
FindDynamicShadowStart(). AFAICT this is only needed because
FindDynamicShadowStart utilizes kHighShadowEnd to
get the shadow size, and kHighShadowEnd is a macro invoking
MEM_TO_SHADOW(kHighMemEnd) which in turn invokes:
(((kHighMemEnd) >> SHADOW_SCALE) + (SHADOW_OFFSET))
I.e. it computes the shadow space needed by kHighMemEnd (the shift), and
adds the offset. Since we only want the shadow space here, the earlier
setting of SHADOW_OFFSET to 0 via __asan_shadow_memory_dynamic_address
accomplishes this. In the hwasan version, it simply gets the shadow
space via "MemToShadowSize(kHighMemEnd)", where MemToShadowSize just
does the shift. I've simplified the asan handling to do the same
thing, and therefore was able to remove the setting of the SHADOW_OFFSET
via __asan_shadow_memory_dynamic_address to 0.
Reviewers: vitalybuka, kcc, eugenis
Subscribers: dberris, #sanitizers, llvm-commits, davidxl
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D83247
2020-07-07 02:05:12 +08:00
|
|
|
// With the zero shadow base we can not actually map pages starting from 0.
|
|
|
|
// This constant is somewhat arbitrary.
|
|
|
|
constexpr uptr kZeroBaseShadowStart = 0;
|
|
|
|
constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
|
2017-12-09 09:31:51 +08:00
|
|
|
|
2017-12-13 09:16:34 +08:00
|
|
|
static void ProtectGap(uptr addr, uptr size) {
|
[compiler-rt][asan][hwasan] Refactor shadow setup into sanitizer_common (NFCI)
Summary:
This refactors some common support related to shadow memory setup from
asan and hwasan into sanitizer_common. This should not only reduce code
duplication but also make these facilities available for new compiler-rt
uses (e.g. heap profiling).
In most cases the separate copies of the code were either identical, or
at least functionally identical. A few notes:
In ProtectGap, the asan version checked the address against an upper
bound (kZeroBaseMaxShadowStart, which is (2^18). I have created a copy
of kZeroBaseMaxShadowStart in hwasan_mapping.h, with the same value, as
it isn't clear why that code should not do the same check. If it
shouldn't, I can remove this and guard this check so that it only
happens for asan.
In asan's InitializeShadowMemory, in the dynamic shadow case it was
setting __asan_shadow_memory_dynamic_address to 0 (which then sets both
macro SHADOW_OFFSET as well as macro kLowShadowBeg to 0) before calling
FindDynamicShadowStart(). AFAICT this is only needed because
FindDynamicShadowStart utilizes kHighShadowEnd to
get the shadow size, and kHighShadowEnd is a macro invoking
MEM_TO_SHADOW(kHighMemEnd) which in turn invokes:
(((kHighMemEnd) >> SHADOW_SCALE) + (SHADOW_OFFSET))
I.e. it computes the shadow space needed by kHighMemEnd (the shift), and
adds the offset. Since we only want the shadow space here, the earlier
setting of SHADOW_OFFSET to 0 via __asan_shadow_memory_dynamic_address
accomplishes this. In the hwasan version, it simply gets the shadow
space via "MemToShadowSize(kHighMemEnd)", where MemToShadowSize just
does the shift. I've simplified the asan handling to do the same
thing, and therefore was able to remove the setting of the SHADOW_OFFSET
via __asan_shadow_memory_dynamic_address to 0.
Reviewers: vitalybuka, kcc, eugenis
Subscribers: dberris, #sanitizers, llvm-commits, davidxl
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D83247
2020-07-07 02:05:12 +08:00
|
|
|
__sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
|
|
|
|
kZeroBaseMaxShadowStart);
|
2017-12-13 09:16:34 +08:00
|
|
|
}
|
2017-12-09 09:31:51 +08:00
|
|
|
|
[compiler-rt][asan][hwasan] Refactor shadow setup into sanitizer_common (NFCI)
Summary:
This refactors some common support related to shadow memory setup from
asan and hwasan into sanitizer_common. This should not only reduce code
duplication but also make these facilities available for new compiler-rt
uses (e.g. heap profiling).
In most cases the separate copies of the code were either identical, or
at least functionally identical. A few notes:
In ProtectGap, the asan version checked the address against an upper
bound (kZeroBaseMaxShadowStart, which is (2^18). I have created a copy
of kZeroBaseMaxShadowStart in hwasan_mapping.h, with the same value, as
it isn't clear why that code should not do the same check. If it
shouldn't, I can remove this and guard this check so that it only
happens for asan.
In asan's InitializeShadowMemory, in the dynamic shadow case it was
setting __asan_shadow_memory_dynamic_address to 0 (which then sets both
macro SHADOW_OFFSET as well as macro kLowShadowBeg to 0) before calling
FindDynamicShadowStart(). AFAICT this is only needed because
FindDynamicShadowStart utilizes kHighShadowEnd to
get the shadow size, and kHighShadowEnd is a macro invoking
MEM_TO_SHADOW(kHighMemEnd) which in turn invokes:
(((kHighMemEnd) >> SHADOW_SCALE) + (SHADOW_OFFSET))
I.e. it computes the shadow space needed by kHighMemEnd (the shift), and
adds the offset. Since we only want the shadow space here, the earlier
setting of SHADOW_OFFSET to 0 via __asan_shadow_memory_dynamic_address
accomplishes this. In the hwasan version, it simply gets the shadow
space via "MemToShadowSize(kHighMemEnd)", where MemToShadowSize just
does the shift. I've simplified the asan handling to do the same
thing, and therefore was able to remove the setting of the SHADOW_OFFSET
via __asan_shadow_memory_dynamic_address to 0.
Reviewers: vitalybuka, kcc, eugenis
Subscribers: dberris, #sanitizers, llvm-commits, davidxl
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D83247
2020-07-07 02:05:12 +08:00
|
|
|
uptr kLowMemStart;
|
|
|
|
uptr kLowMemEnd;
|
|
|
|
uptr kHighMemStart;
|
|
|
|
uptr kHighMemEnd;
|
2018-04-21 04:03:57 +08:00
|
|
|
|
2018-04-24 02:19:23 +08:00
|
|
|
static void PrintRange(uptr start, uptr end, const char *name) {
|
|
|
|
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
|
|
|
|
}
|
2018-04-21 04:03:57 +08:00
|
|
|
|
2018-04-24 02:19:23 +08:00
|
|
|
static void PrintAddressSpaceLayout() {
|
|
|
|
PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
|
|
|
|
if (kHighShadowEnd + 1 < kHighMemStart)
|
|
|
|
PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
|
|
|
|
else
|
|
|
|
CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
|
|
|
|
PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
|
2018-08-30 06:47:53 +08:00
|
|
|
if (kLowShadowEnd + 1 < kHighShadowStart)
|
|
|
|
PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
|
|
|
|
else
|
|
|
|
CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
|
|
|
|
PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
|
|
|
|
if (kLowMemEnd + 1 < kLowShadowStart)
|
|
|
|
PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
|
|
|
|
else
|
|
|
|
CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
|
|
|
|
PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
|
|
|
|
CHECK_EQ(0, kLowMemStart);
|
2018-04-24 02:19:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static uptr GetHighMemEnd() {
|
2018-04-24 01:26:33 +08:00
|
|
|
// HighMem covers the upper part of the address space.
|
2018-04-24 02:19:23 +08:00
|
|
|
uptr max_address = GetMaxUserVirtualAddress();
|
2018-08-30 06:47:53 +08:00
|
|
|
// Adjust max address to make sure that kHighMemEnd and kHighMemStart are
|
|
|
|
// properly aligned:
|
|
|
|
max_address |= (GetMmapGranularity() << kShadowScale) - 1;
|
2018-04-24 02:19:23 +08:00
|
|
|
return max_address;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
|
2018-08-30 06:54:52 +08:00
|
|
|
__hwasan_shadow_memory_dynamic_address =
|
|
|
|
FindDynamicShadowStart(shadow_size_bytes);
|
2018-04-24 02:19:23 +08:00
|
|
|
}
|
|
|
|
|
2021-07-09 06:32:20 +08:00
|
|
|
void InitializeOsSupport() {
|
2021-08-18 01:57:25 +08:00
|
|
|
# define PR_SET_TAGGED_ADDR_CTRL 55
|
|
|
|
# define PR_GET_TAGGED_ADDR_CTRL 56
|
|
|
|
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
2019-10-18 04:32:54 +08:00
|
|
|
// Check we're running on a kernel that can use the tagged address ABI.
|
2021-02-20 00:19:37 +08:00
|
|
|
int local_errno = 0;
|
|
|
|
if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
|
|
|
|
&local_errno) &&
|
|
|
|
local_errno == EINVAL) {
|
2021-05-15 00:52:47 +08:00
|
|
|
# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
|
2019-10-18 04:32:54 +08:00
|
|
|
// Some older Android kernels have the tagged pointer ABI on
|
|
|
|
// unconditionally, and hence don't have the tagged-addr prctl while still
|
|
|
|
// allow the ABI.
|
|
|
|
// If targeting Android and the prctl is not around we assume this is the
|
|
|
|
// case.
|
|
|
|
return;
|
2021-05-15 00:52:47 +08:00
|
|
|
# else
|
2021-02-20 00:19:37 +08:00
|
|
|
if (flags()->fail_without_syscall_abi) {
|
|
|
|
Printf(
|
|
|
|
"FATAL: "
|
|
|
|
"HWAddressSanitizer requires a kernel with tagged address ABI.\n");
|
|
|
|
Die();
|
|
|
|
}
|
2021-05-15 00:52:47 +08:00
|
|
|
# endif
|
2019-10-18 04:32:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Turn on the tagged address ABI.
|
2021-02-20 00:19:37 +08:00
|
|
|
if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
|
|
|
|
PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
|
2021-05-15 00:52:47 +08:00
|
|
|
!internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
|
|
|
|
# if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
|
|
|
|
// Try the new prctl API for Intel LAM. The API is based on a currently
|
|
|
|
// unsubmitted patch to the Linux kernel (as of May 2021) and is thus
|
|
|
|
// subject to change. Patch is here:
|
|
|
|
// https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
|
|
|
|
int tag_bits = kTagBits;
|
|
|
|
int tag_shift = kAddressTagShift;
|
|
|
|
if (!internal_iserror(
|
|
|
|
internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
|
|
|
|
reinterpret_cast<unsigned long>(&tag_bits),
|
|
|
|
reinterpret_cast<unsigned long>(&tag_shift), 0))) {
|
|
|
|
CHECK_EQ(tag_bits, kTagBits);
|
|
|
|
CHECK_EQ(tag_shift, kAddressTagShift);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
# endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
|
|
|
|
if (flags()->fail_without_syscall_abi) {
|
|
|
|
Printf(
|
|
|
|
"FATAL: HWAddressSanitizer failed to enable tagged address syscall "
|
|
|
|
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
|
|
|
|
"configuration.\n");
|
|
|
|
Die();
|
|
|
|
}
|
2019-10-18 04:32:54 +08:00
|
|
|
}
|
2021-08-18 01:57:25 +08:00
|
|
|
# undef PR_SET_TAGGED_ADDR_CTRL
|
|
|
|
# undef PR_GET_TAGGED_ADDR_CTRL
|
|
|
|
# undef PR_TAGGED_ADDR_ENABLE
|
2019-10-18 04:32:54 +08:00
|
|
|
}
|
|
|
|
|
2018-04-24 02:19:23 +08:00
|
|
|
bool InitShadow() {
|
|
|
|
// Define the entire memory range.
|
|
|
|
kHighMemEnd = GetHighMemEnd();
|
|
|
|
|
|
|
|
// Determine shadow memory base offset.
|
2018-08-30 06:42:16 +08:00
|
|
|
InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
|
2018-04-24 02:19:23 +08:00
|
|
|
|
|
|
|
// Place the low memory first.
|
2018-08-30 06:47:53 +08:00
|
|
|
kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
|
|
|
|
kLowMemStart = 0;
|
2017-12-13 09:16:34 +08:00
|
|
|
|
2018-04-24 02:19:23 +08:00
|
|
|
// Define the low shadow based on the already placed low memory.
|
2018-08-30 06:42:16 +08:00
|
|
|
kLowShadowEnd = MemToShadow(kLowMemEnd);
|
2018-08-30 06:47:53 +08:00
|
|
|
kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
|
2018-04-24 02:19:23 +08:00
|
|
|
|
|
|
|
// High shadow takes whatever memory is left up there (making sure it is not
|
|
|
|
// interfering with low memory in the fixed case).
|
2018-08-30 06:42:16 +08:00
|
|
|
kHighShadowEnd = MemToShadow(kHighMemEnd);
|
|
|
|
kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
|
2018-04-24 02:19:23 +08:00
|
|
|
|
|
|
|
// High memory starts where allocated shadow allows.
|
2018-08-30 06:42:16 +08:00
|
|
|
kHighMemStart = ShadowToMem(kHighShadowStart);
|
2018-04-24 02:19:23 +08:00
|
|
|
|
|
|
|
// Check the sanity of the defined memory ranges (there might be gaps).
|
|
|
|
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
|
|
|
|
CHECK_GT(kHighMemStart, kHighShadowEnd);
|
|
|
|
CHECK_GT(kHighShadowEnd, kHighShadowStart);
|
|
|
|
CHECK_GT(kHighShadowStart, kLowMemEnd);
|
|
|
|
CHECK_GT(kLowMemEnd, kLowMemStart);
|
|
|
|
CHECK_GT(kLowShadowEnd, kLowShadowStart);
|
2018-08-30 06:47:53 +08:00
|
|
|
CHECK_GT(kLowShadowStart, kLowMemEnd);
|
2018-04-24 02:19:23 +08:00
|
|
|
|
|
|
|
if (Verbosity())
|
|
|
|
PrintAddressSpaceLayout();
|
|
|
|
|
|
|
|
// Reserve shadow memory.
|
|
|
|
ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
|
|
|
|
ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
|
|
|
|
|
|
|
|
// Protect all the gaps.
|
|
|
|
ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
|
2018-08-30 06:47:53 +08:00
|
|
|
if (kLowMemEnd + 1 < kLowShadowStart)
|
|
|
|
ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
|
|
|
|
if (kLowShadowEnd + 1 < kHighShadowStart)
|
|
|
|
ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
|
2018-04-24 02:19:23 +08:00
|
|
|
if (kHighShadowEnd + 1 < kHighMemStart)
|
|
|
|
ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
|
2017-12-13 09:16:34 +08:00
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-09-25 07:03:34 +08:00
|
|
|
void InitThreads() {
|
|
|
|
CHECK(__hwasan_shadow_memory_dynamic_address);
|
|
|
|
uptr guard_page_size = GetMmapGranularity();
|
|
|
|
uptr thread_space_start =
|
|
|
|
__hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
|
|
|
|
uptr thread_space_end =
|
|
|
|
__hwasan_shadow_memory_dynamic_address - guard_page_size;
|
|
|
|
ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
|
[compiler-rt][asan][hwasan] Refactor shadow setup into sanitizer_common (NFCI)
Summary:
This refactors some common support related to shadow memory setup from
asan and hwasan into sanitizer_common. This should not only reduce code
duplication but also make these facilities available for new compiler-rt
uses (e.g. heap profiling).
In most cases the separate copies of the code were either identical, or
at least functionally identical. A few notes:
In ProtectGap, the asan version checked the address against an upper
bound (kZeroBaseMaxShadowStart, which is (2^18). I have created a copy
of kZeroBaseMaxShadowStart in hwasan_mapping.h, with the same value, as
it isn't clear why that code should not do the same check. If it
shouldn't, I can remove this and guard this check so that it only
happens for asan.
In asan's InitializeShadowMemory, in the dynamic shadow case it was
setting __asan_shadow_memory_dynamic_address to 0 (which then sets both
macro SHADOW_OFFSET as well as macro kLowShadowBeg to 0) before calling
FindDynamicShadowStart(). AFAICT this is only needed because
FindDynamicShadowStart utilizes kHighShadowEnd to
get the shadow size, and kHighShadowEnd is a macro invoking
MEM_TO_SHADOW(kHighMemEnd) which in turn invokes:
(((kHighMemEnd) >> SHADOW_SCALE) + (SHADOW_OFFSET))
I.e. it computes the shadow space needed by kHighMemEnd (the shift), and
adds the offset. Since we only want the shadow space here, the earlier
setting of SHADOW_OFFSET to 0 via __asan_shadow_memory_dynamic_address
accomplishes this. In the hwasan version, it simply gets the shadow
space via "MemToShadowSize(kHighMemEnd)", where MemToShadowSize just
does the shift. I've simplified the asan handling to do the same
thing, and therefore was able to remove the setting of the SHADOW_OFFSET
via __asan_shadow_memory_dynamic_address to 0.
Reviewers: vitalybuka, kcc, eugenis
Subscribers: dberris, #sanitizers, llvm-commits, davidxl
Tags: #sanitizers
Differential Revision: https://reviews.llvm.org/D83247
2020-07-07 02:05:12 +08:00
|
|
|
"hwasan threads", /*madvise_shadow*/ false);
|
2018-09-25 07:03:34 +08:00
|
|
|
ProtectGap(thread_space_end,
|
|
|
|
__hwasan_shadow_memory_dynamic_address - thread_space_end);
|
|
|
|
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
|
2021-06-19 04:39:58 +08:00
|
|
|
hwasanThreadList().CreateCurrentThread();
|
2018-09-25 07:03:34 +08:00
|
|
|
}
|
|
|
|
|
2018-01-12 06:53:30 +08:00
|
|
|
bool MemIsApp(uptr p) {
|
2021-05-15 00:52:47 +08:00
|
|
|
// Memory outside the alias range has non-zero tags.
|
|
|
|
# if !defined(HWASAN_ALIASING_MODE)
|
2018-01-12 06:53:30 +08:00
|
|
|
CHECK(GetTagFromPointer(p) == 0);
|
2021-05-15 00:52:47 +08:00
|
|
|
# endif
|
|
|
|
|
2021-08-23 21:03:16 +08:00
|
|
|
return (p >= kHighMemStart && p <= kHighMemEnd) ||
|
|
|
|
(p >= kLowMemStart && p <= kLowMemEnd);
|
2018-01-12 06:53:30 +08:00
|
|
|
}
|
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
void InstallAtExitHandler() { atexit(HwasanAtExit); }
|
2017-12-09 09:31:51 +08:00
|
|
|
|
|
|
|
// ---------------------- TSD ---------------- {{{1
|
|
|
|
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
extern "C" void __hwasan_thread_enter() {
|
[HWASan] Ensure RNG is initialized in GenerateRandomTag
Fixes a CHECK-failure caused by glibc's pthread_getattr_np
implementation calling realloc. Essentially, Thread::GenerateRandomTag
gets called during Thread::Init and before Thread::InitRandomState:
HWAddressSanitizer: CHECK failed: hwasan_thread.cpp:134 "((random_buffer_)) != (0)" (0x0, 0x0) (tid=314)
#0 0x55845475a662 in __hwasan::CheckUnwind()
#1 0x558454778797 in __sanitizer::CheckFailed(char const*, int, char const*, unsigned long long, unsigned long long)
#2 0x558454766461 in __hwasan::Thread::GenerateRandomTag(unsigned long)
#3 0x55845475c58b in __hwasan::HwasanAllocate(__sanitizer::StackTrace*, unsigned long, unsigned long, bool)
#4 0x55845475c80a in __hwasan::hwasan_realloc(void*, unsigned long, __sanitizer::StackTrace*)
#5 0x5584547608aa in realloc
#6 0x7f6f3a3d8c2c in pthread_getattr_np
#7 0x5584547790dc in __sanitizer::GetThreadStackTopAndBottom(bool, unsigned long*, unsigned long*)
#8 0x558454779651 in __sanitizer::GetThreadStackAndTls(bool, unsigned long*, unsigned long*, unsigned long*, unsigned long*)
#9 0x558454761bca in __hwasan::Thread::InitStackAndTls(__hwasan::Thread::InitState const*)
#10 0x558454761e5c in __hwasan::HwasanThreadList::CreateCurrentThread(__hwasan::Thread::InitState const*)
#11 0x55845476184f in __hwasan_thread_enter
#12 0x558454760def in HwasanThreadStartFunc(void*)
#13 0x7f6f3a3d6fa2 in start_thread
#14 0x7f6f3a15b4ce in __clone
Also reverts 7a3fb71c3cbdd80666335fa8f6f071b43f0b922a, as it's now
unneeded.
Reviewed By: vitalybuka
Differential Revision: https://reviews.llvm.org/D113045
2021-11-09 22:52:32 +08:00
|
|
|
hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extern "C" void __hwasan_thread_exit() {
|
2018-08-30 08:13:20 +08:00
|
|
|
Thread *t = GetCurrentThread();
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
// Make sure that signal handler can not see a stale current thread pointer.
|
|
|
|
atomic_signal_fence(memory_order_seq_cst);
|
2018-09-05 09:29:08 +08:00
|
|
|
if (t)
|
2018-09-25 07:03:34 +08:00
|
|
|
hwasanThreadList().ReleaseThread(t);
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
}
|
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# if HWASAN_WITH_INTERCEPTORS
|
2017-12-09 09:31:51 +08:00
|
|
|
static pthread_key_t tsd_key;
|
|
|
|
static bool tsd_key_inited = false;
|
|
|
|
|
2018-09-25 07:03:34 +08:00
|
|
|
void HwasanTSDThreadInit() {
|
|
|
|
if (tsd_key_inited)
|
|
|
|
CHECK_EQ(0, pthread_setspecific(tsd_key,
|
|
|
|
(void *)GetPthreadDestructorIterations()));
|
|
|
|
}
|
|
|
|
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
void HwasanTSDDtor(void *tsd) {
|
2018-09-25 07:03:34 +08:00
|
|
|
uptr iterations = (uptr)tsd;
|
|
|
|
if (iterations > 1) {
|
|
|
|
CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
__hwasan_thread_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
void HwasanTSDInit() {
|
2017-12-09 09:31:51 +08:00
|
|
|
CHECK(!tsd_key_inited);
|
|
|
|
tsd_key_inited = true;
|
[hwasan] Add a (almost) no-interceptor mode.
Summary:
The idea behind this change is to allow sanitization of libc. We are prototyping on Bionic,
but the tool interface will be general enough (or at least generalizable) to support any other libc.
When libc depends on libclang_rt.hwasan, the latter can not interpose libc functions.
In fact, majority of interceptors become unnecessary when libc code is instrumented.
This change gets rid of most hwasan interceptors and provides interface for libc to notify
hwasan about thread creation and destruction events. Some interceptors (pthread_create)
are kept under #ifdef to enable testing with uninstrumented libc. They are expressed in
terms of the new libc interface.
The new cmake switch, COMPILER_RT_HWASAN_WITH_INTERCEPTORS, ON by default, builds testing
version of the library with the aforementioned pthread_create interceptor.
With the OFF setting, the library becomes more of a libc plugin.
Reviewers: vitalybuka, kcc, jfb
Subscribers: srhines, kubamracek, mgorny, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50922
llvm-svn: 340216
2018-08-21 05:49:15 +08:00
|
|
|
CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
2021-08-18 01:57:25 +08:00
|
|
|
# else
|
2018-09-25 06:50:32 +08:00
|
|
|
void HwasanTSDInit() {}
|
2018-09-25 07:03:34 +08:00
|
|
|
void HwasanTSDThreadInit() {}
|
2021-08-18 01:57:25 +08:00
|
|
|
# endif
|
2018-09-25 05:38:42 +08:00
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# if SANITIZER_ANDROID
|
|
|
|
uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
|
|
|
|
# else
|
|
|
|
uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
|
|
|
|
# endif
|
2018-09-25 06:50:32 +08:00
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# if SANITIZER_ANDROID
|
2018-12-13 06:10:52 +08:00
|
|
|
void AndroidTestTlsSlot() {
|
|
|
|
uptr kMagicValue = 0x010203040A0B0C0D;
|
2019-02-01 07:37:12 +08:00
|
|
|
uptr *tls_ptr = GetCurrentThreadLongPtr();
|
|
|
|
uptr old_value = *tls_ptr;
|
|
|
|
*tls_ptr = kMagicValue;
|
2018-12-13 06:10:52 +08:00
|
|
|
dlerror();
|
|
|
|
if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
|
|
|
|
Printf(
|
|
|
|
"ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
|
|
|
|
"for dlerror().\n");
|
|
|
|
Die();
|
|
|
|
}
|
2019-02-01 07:37:12 +08:00
|
|
|
*tls_ptr = old_value;
|
2018-12-13 06:10:52 +08:00
|
|
|
}
|
2021-08-18 01:57:25 +08:00
|
|
|
# else
|
2018-12-13 06:10:52 +08:00
|
|
|
void AndroidTestTlsSlot() {}
|
2021-08-18 01:57:25 +08:00
|
|
|
# endif
|
2018-12-13 06:10:52 +08:00
|
|
|
|
2017-12-09 09:31:51 +08:00
|
|
|
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
// Access type is passed in a platform dependent way (see below) and encoded
|
|
|
|
// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
|
|
|
|
// recoverable. Valid values of Y are 0 to 4, which are interpreted as
|
|
|
|
// log2(access_size), and 0xF, which means that access size is passed via
|
|
|
|
// platform dependent register (see below).
|
2021-08-18 01:57:25 +08:00
|
|
|
# if defined(__aarch64__)
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
// Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
|
|
|
|
// access size is stored in X1 register. Access address is always in X0
|
|
|
|
// register.
|
2017-12-09 09:31:51 +08:00
|
|
|
uptr pc = (uptr)info->si_addr;
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
|
2018-02-22 03:52:23 +08:00
|
|
|
if ((code & 0xff00) != 0x900)
|
2021-08-18 01:57:25 +08:00
|
|
|
return AccessInfo{}; // Not ours.
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
|
|
|
|
const bool is_store = code & 0x10;
|
|
|
|
const bool recover = code & 0x20;
|
2018-03-24 08:40:51 +08:00
|
|
|
const uptr addr = uc->uc_mcontext.regs[0];
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
const unsigned size_log = code & 0xf;
|
2017-12-13 09:16:34 +08:00
|
|
|
if (size_log > 4 && size_log != 0xf)
|
2021-08-18 01:57:25 +08:00
|
|
|
return AccessInfo{}; // Not ours.
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
|
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# elif defined(__x86_64__)
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
// Access type is encoded in the instruction following INT3 as
|
|
|
|
// NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
|
|
|
|
// RSI register. Access address is always in RDI register.
|
|
|
|
uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
|
2021-08-18 01:57:25 +08:00
|
|
|
uint8_t *nop = (uint8_t *)pc;
|
|
|
|
if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
*(nop + 3) < 0x40)
|
2021-08-18 01:57:25 +08:00
|
|
|
return AccessInfo{}; // Not ours.
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
const unsigned code = *(nop + 3);
|
|
|
|
|
|
|
|
const bool is_store = code & 0x10;
|
|
|
|
const bool recover = code & 0x20;
|
|
|
|
const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
|
|
|
|
const unsigned size_log = code & 0xf;
|
|
|
|
if (size_log > 4 && size_log != 0xf)
|
2021-08-18 01:57:25 +08:00
|
|
|
return AccessInfo{}; // Not ours.
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
const uptr size =
|
|
|
|
size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
|
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# else
|
|
|
|
# error Unsupported architecture
|
|
|
|
# endif
|
2017-12-09 09:31:51 +08:00
|
|
|
|
[HWASan] Port HWASan to Linux x86-64 (compiler-rt)
Summary:
Porting HWASan to Linux x86-64, first of the three patches, compiler-rt part.
The approach is similar to ARM case, trap signal is used to communicate
memory tag check failure. int3 instruction is used to generate a signal,
access parameters are stored in nop [eax + offset] instruction immediately
following the int3 one
Had to add HWASan init on malloc because, due to much less interceptors
defined (most other sanitizers intercept much more and get initalized
via one of those interceptors or don't care about malloc), HWASan was not
initialized yet when libstdc++ was trying to allocate memory for its own
fixed-size heap, which led to CHECK-fail in AllocateFromLocalPool.
Also added the CHECK() failure handler with more detailed message and
stack reporting.
Reviewers: eugenis
Subscribers: kubamracek, dberris, mgorny, kristof.beyls, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D44705
llvm-svn: 328385
2018-03-24 07:38:04 +08:00
|
|
|
return AccessInfo{addr, size, is_store, !is_store, recover};
|
|
|
|
}
|
|
|
|
|
hwasan: Move memory access checks into small outlined functions on aarch64.
Each hwasan check requires emitting a small piece of code like this:
https://clang.llvm.org/docs/HardwareAssistedAddressSanitizerDesign.html#memory-accesses
The problem with this is that these code blocks typically bloat code
size significantly.
An obvious solution is to outline these blocks of code. In fact, this
has already been implemented under the -hwasan-instrument-with-calls
flag. However, as currently implemented this has a number of problems:
- The functions use the same calling convention as regular C functions.
This means that the backend must spill all temporary registers as
required by the platform's C calling convention, even though the
check only needs two registers on the hot path.
- The functions take the address to be checked in a fixed register,
which increases register pressure.
Both of these factors can diminish the code size effect and increase
the performance hit of -hwasan-instrument-with-calls.
The solution that this patch implements is to involve the aarch64
backend in outlining the checks. An intrinsic and pseudo-instruction
are created to represent a hwasan check. The pseudo-instruction
is register allocated like any other instruction, and we allow the
register allocator to select almost any register for the address to
check. A particular combination of (register selection, type of check)
triggers the creation in the backend of a function to handle the check
for specifically that pair. The resulting functions are deduplicated by
the linker. The pseudo-instruction (really the function) is specified
to preserve all registers except for the registers that the AAPCS
specifies may be clobbered by a call.
To measure the code size and performance effect of this change, I
took a number of measurements using Chromium for Android on aarch64,
comparing a browser with inlined checks (the baseline) against a
browser with outlined checks.
Code size: Size of .text decreases from 243897420 to 171619972 bytes,
or a 30% decrease.
Performance: Using Chromium's blink_perf.layout microbenchmarks I
measured a median performance regression of 6.24%.
The fact that a perf/size tradeoff is evident here suggests that
we might want to make the new behaviour conditional on -Os/-Oz.
But for now I've enabled it unconditionally, my reasoning being that
hwasan users typically expect a relatively large perf hit, and ~6%
isn't really adding much. We may want to revisit this decision in
the future, though.
I also tried experimenting with varying the number of registers
selectable by the hwasan check pseudo-instruction (which would result
in fewer variants being created), on the hypothesis that creating
fewer variants of the function would expose another perf/size tradeoff
by reducing icache pressure from the check functions at the cost of
register pressure. Although I did observe a code size increase with
fewer registers, I did not observe a strong correlation between the
number of registers and the performance of the resulting browser on the
microbenchmarks, so I conclude that we might as well use ~all registers
to get the maximum code size improvement. My results are below:
Regs | .text size | Perf hit
-----+------------+---------
~all | 171619972 | 6.24%
16 | 171765192 | 7.03%
8 | 172917788 | 5.82%
4 | 177054016 | 6.89%
Differential Revision: https://reviews.llvm.org/D56954
llvm-svn: 351920
2019-01-23 10:20:10 +08:00
|
|
|
static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
|
|
|
|
AccessInfo ai = GetAccessInfo(info, uc);
|
|
|
|
if (!ai.is_store && !ai.is_load)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SignalContext sig{info, uc};
|
|
|
|
HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
|
2017-12-13 09:16:34 +08:00
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
# if defined(__aarch64__)
|
2017-12-13 09:16:34 +08:00
|
|
|
uc->uc_mcontext.pc += 4;
|
2021-08-18 01:57:25 +08:00
|
|
|
# elif defined(__x86_64__)
|
|
|
|
# else
|
|
|
|
# error Unsupported architecture
|
|
|
|
# endif
|
2017-12-13 09:16:34 +08:00
|
|
|
return true;
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void OnStackUnwind(const SignalContext &sig, const void *,
|
|
|
|
BufferedStackTrace *stack) {
|
2019-03-01 12:03:38 +08:00
|
|
|
stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
|
|
|
|
common_flags()->fast_unwind_on_fatal);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void HwasanOnDeadlySignal(int signo, void *info, void *context) {
|
|
|
|
// Probably a tag mismatch.
|
2018-02-22 03:52:23 +08:00
|
|
|
if (signo == SIGTRAP)
|
2021-08-18 01:57:25 +08:00
|
|
|
if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
|
2017-12-13 09:16:34 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
|
2017-12-09 09:31:51 +08:00
|
|
|
}
|
|
|
|
|
2021-06-19 02:10:38 +08:00
|
|
|
void Thread::InitStackAndTls(const InitState *) {
|
2021-06-17 01:24:51 +08:00
|
|
|
uptr tls_size;
|
|
|
|
uptr stack_size;
|
|
|
|
GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
|
|
|
|
&tls_size);
|
|
|
|
stack_top_ = stack_bottom_ + stack_size;
|
|
|
|
tls_end_ = tls_begin_ + tls_size;
|
|
|
|
}
|
2017-12-09 09:31:51 +08:00
|
|
|
|
2021-07-09 06:04:30 +08:00
|
|
|
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
|
|
|
|
CHECK(IsAligned(p, kShadowAlignment));
|
|
|
|
CHECK(IsAligned(size, kShadowAlignment));
|
|
|
|
uptr shadow_start = MemToShadow(p);
|
|
|
|
uptr shadow_size = MemToShadowSize(size);
|
|
|
|
|
|
|
|
uptr page_size = GetPageSizeCached();
|
|
|
|
uptr page_start = RoundUpTo(shadow_start, page_size);
|
|
|
|
uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
|
|
|
|
uptr threshold = common_flags()->clear_shadow_mmap_threshold;
|
|
|
|
if (SANITIZER_LINUX &&
|
|
|
|
UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
|
|
|
|
internal_memset((void *)shadow_start, tag, page_start - shadow_start);
|
|
|
|
internal_memset((void *)page_end, tag,
|
|
|
|
shadow_start + shadow_size - page_end);
|
|
|
|
// For an anonymous private mapping MADV_DONTNEED will return a zero page on
|
|
|
|
// Linux.
|
|
|
|
ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
|
|
|
|
} else {
|
|
|
|
internal_memset((void *)shadow_start, tag, shadow_size);
|
|
|
|
}
|
|
|
|
return AddTagToPointer(p, tag);
|
|
|
|
}
|
|
|
|
|
2021-08-14 08:09:10 +08:00
|
|
|
void HwasanInstallAtForkHandler() {
|
|
|
|
auto before = []() {
|
|
|
|
HwasanAllocatorLock();
|
|
|
|
StackDepotLockAll();
|
|
|
|
};
|
|
|
|
auto after = []() {
|
|
|
|
StackDepotUnlockAll();
|
|
|
|
HwasanAllocatorUnlock();
|
|
|
|
};
|
|
|
|
pthread_atfork(before, after, after);
|
|
|
|
}
|
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
} // namespace __hwasan
|
2017-12-09 09:31:51 +08:00
|
|
|
|
2021-08-18 01:57:25 +08:00
|
|
|
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|