2012-05-10 21:48:04 +08:00
|
|
|
//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-05-10 21:48:04 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
|
|
//
|
|
|
|
// Main internal TSan header file.
|
|
|
|
//
|
|
|
|
// Ground rules:
|
|
|
|
// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
|
|
|
|
// function-scope locals)
|
|
|
|
// - All functions/classes/etc reside in namespace __tsan, except for those
|
|
|
|
// declared in tsan_interface.h.
|
|
|
|
// - Platform-specific files should be used instead of ifdefs (*).
|
|
|
|
// - No system headers included in header files (*).
|
|
|
|
// - Platform specific headres included only into platform-specific files (*).
|
|
|
|
//
|
|
|
|
// (*) Except when inlining is critical for performance.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef TSAN_RTL_H
|
|
|
|
#define TSAN_RTL_H
|
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
#include "sanitizer_common/sanitizer_allocator.h"
|
2013-05-29 17:15:39 +08:00
|
|
|
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
2013-12-05 15:44:35 +08:00
|
|
|
#include "sanitizer_common/sanitizer_asm.h"
|
2013-03-15 21:48:44 +08:00
|
|
|
#include "sanitizer_common/sanitizer_common.h"
|
2014-02-28 18:48:13 +08:00
|
|
|
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
|
2013-10-03 21:37:17 +08:00
|
|
|
#include "sanitizer_common/sanitizer_libignore.h"
|
2013-06-26 23:37:14 +08:00
|
|
|
#include "sanitizer_common/sanitizer_suppressions.h"
|
2013-03-15 21:48:44 +08:00
|
|
|
#include "sanitizer_common/sanitizer_thread_registry.h"
|
2017-12-04 20:30:09 +08:00
|
|
|
#include "sanitizer_common/sanitizer_vector.h"
|
2012-05-10 21:48:04 +08:00
|
|
|
#include "tsan_clock.h"
|
|
|
|
#include "tsan_defs.h"
|
|
|
|
#include "tsan_flags.h"
|
2017-12-04 20:30:09 +08:00
|
|
|
#include "tsan_mman.h"
|
2012-05-10 21:48:04 +08:00
|
|
|
#include "tsan_sync.h"
|
|
|
|
#include "tsan_trace.h"
|
|
|
|
#include "tsan_report.h"
|
2012-11-28 18:35:31 +08:00
|
|
|
#include "tsan_platform.h"
|
2012-12-06 20:16:15 +08:00
|
|
|
#include "tsan_mutexset.h"
|
2013-11-27 19:30:28 +08:00
|
|
|
#include "tsan_ignoreset.h"
|
2014-05-29 21:50:54 +08:00
|
|
|
#include "tsan_stack_trace.h"
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-12-04 23:13:30 +08:00
|
|
|
#if SANITIZER_WORDSIZE != 64
|
|
|
|
# error "ThreadSanitizer is supported only on 64-bit platforms"
|
|
|
|
#endif
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
namespace __tsan {
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-03-18 18:32:21 +08:00
|
|
|
struct MapUnmapCallback;
|
2015-12-09 05:54:39 +08:00
|
|
|
#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
|
2019-04-27 14:30:52 +08:00
|
|
|
|
2017-05-15 22:47:19 +08:00
|
|
|
struct AP32 {
|
|
|
|
static const uptr kSpaceBeg = 0;
|
|
|
|
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
|
|
|
|
static const uptr kMetadataSize = 0;
|
|
|
|
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
|
2019-04-27 14:30:52 +08:00
|
|
|
static const uptr kRegionSizeLog = 20;
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
using AddressSpaceView = LocalAddressSpaceView;
|
2017-05-15 22:47:19 +08:00
|
|
|
typedef __tsan::MapUnmapCallback MapUnmapCallback;
|
2019-04-27 14:30:52 +08:00
|
|
|
static const uptr kFlags = 0;
|
2017-05-15 22:47:19 +08:00
|
|
|
};
|
|
|
|
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
|
2015-02-20 14:42:41 +08:00
|
|
|
#else
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
|
|
|
static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
|
|
|
|
static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
|
|
|
|
static const uptr kMetadataSize = 0;
|
|
|
|
typedef DefaultSizeClassMap SizeClassMap;
|
|
|
|
typedef __tsan::MapUnmapCallback MapUnmapCallback;
|
2019-04-27 01:04:05 +08:00
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = LocalAddressSpaceView;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
|
|
|
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
2015-02-20 14:42:41 +08:00
|
|
|
#endif
|
2019-05-02 03:41:54 +08:00
|
|
|
typedef CombinedAllocator<PrimaryAllocator> Allocator;
|
2019-05-02 03:30:49 +08:00
|
|
|
typedef Allocator::AllocatorCache AllocatorCache;
|
2012-08-30 21:02:30 +08:00
|
|
|
Allocator *allocator();
|
2012-08-15 23:35:15 +08:00
|
|
|
#endif
|
|
|
|
|
2012-09-11 17:44:48 +08:00
|
|
|
void TsanCheckFailed(const char *file, int line, const char *cond,
|
|
|
|
u64 v1, u64 v2);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2013-03-20 18:31:53 +08:00
|
|
|
const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
// FastState (from most significant bit):
|
2012-11-28 18:49:27 +08:00
|
|
|
// ignore : 1
|
2012-05-10 21:48:04 +08:00
|
|
|
// tid : kTidBits
|
2012-05-17 22:17:51 +08:00
|
|
|
// unused : -
|
2012-11-28 20:19:50 +08:00
|
|
|
// history_size : 3
|
2014-05-30 21:36:29 +08:00
|
|
|
// epoch : kClkBits
|
2012-05-10 21:48:04 +08:00
|
|
|
class FastState {
|
|
|
|
public:
|
|
|
|
FastState(u64 tid, u64 epoch) {
|
2012-05-17 22:17:51 +08:00
|
|
|
x_ = tid << kTidShift;
|
2014-05-30 21:36:29 +08:00
|
|
|
x_ |= epoch;
|
2012-11-28 18:49:27 +08:00
|
|
|
DCHECK_EQ(tid, this->tid());
|
|
|
|
DCHECK_EQ(epoch, this->epoch());
|
|
|
|
DCHECK_EQ(GetIgnoreBit(), false);
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
explicit FastState(u64 x)
|
|
|
|
: x_(x) {
|
|
|
|
}
|
|
|
|
|
2012-08-16 23:08:49 +08:00
|
|
|
u64 raw() const {
|
|
|
|
return x_;
|
|
|
|
}
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
u64 tid() const {
|
2012-12-01 04:02:11 +08:00
|
|
|
u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 TidWithIgnore() const {
|
2012-05-17 22:17:51 +08:00
|
|
|
u64 res = x_ >> kTidShift;
|
2012-05-10 21:48:04 +08:00
|
|
|
return res;
|
|
|
|
}
|
2012-05-17 22:17:51 +08:00
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
u64 epoch() const {
|
2014-05-30 21:36:29 +08:00
|
|
|
u64 res = x_ & ((1ull << kClkBits) - 1);
|
2012-05-10 21:48:04 +08:00
|
|
|
return res;
|
2012-05-17 22:17:51 +08:00
|
|
|
}
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
void IncrementEpoch() {
|
2012-05-17 22:17:51 +08:00
|
|
|
u64 old_epoch = epoch();
|
2014-05-30 21:36:29 +08:00
|
|
|
x_ += 1;
|
2012-05-21 18:20:53 +08:00
|
|
|
DCHECK_EQ(old_epoch + 1, epoch());
|
2012-05-17 22:17:51 +08:00
|
|
|
(void)old_epoch;
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
2012-05-17 22:17:51 +08:00
|
|
|
|
|
|
|
void SetIgnoreBit() { x_ |= kIgnoreBit; }
|
|
|
|
void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
|
2012-11-28 18:49:27 +08:00
|
|
|
bool GetIgnoreBit() const { return (s64)x_ < 0; }
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-11-28 20:19:50 +08:00
|
|
|
void SetHistorySize(int hs) {
|
|
|
|
CHECK_GE(hs, 0);
|
|
|
|
CHECK_LE(hs, 7);
|
2014-05-30 21:36:29 +08:00
|
|
|
x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
|
2012-11-28 20:19:50 +08:00
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
ALWAYS_INLINE
|
2012-11-28 20:19:50 +08:00
|
|
|
int GetHistorySize() const {
|
2014-05-30 21:36:29 +08:00
|
|
|
return (int)((x_ >> kHistoryShift) & kHistoryMask);
|
2012-11-28 20:19:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ClearHistorySize() {
|
2014-05-30 21:36:29 +08:00
|
|
|
SetHistorySize(0);
|
2012-11-28 20:19:50 +08:00
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
ALWAYS_INLINE
|
2012-11-28 20:19:50 +08:00
|
|
|
u64 GetTracePos() const {
|
|
|
|
const int hs = GetHistorySize();
|
|
|
|
// When hs == 0, the trace consists of 2 parts.
|
|
|
|
const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
|
|
|
|
return epoch() & mask;
|
|
|
|
}
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
private:
|
|
|
|
friend class Shadow;
|
2012-05-17 22:17:51 +08:00
|
|
|
static const int kTidShift = 64 - kTidBits - 1;
|
2012-11-28 18:49:27 +08:00
|
|
|
static const u64 kIgnoreBit = 1ull << 63;
|
2012-05-17 22:17:51 +08:00
|
|
|
static const u64 kFreedBit = 1ull << 63;
|
2014-05-30 21:36:29 +08:00
|
|
|
static const u64 kHistoryShift = kClkBits;
|
|
|
|
static const u64 kHistoryMask = 7;
|
2012-05-10 21:48:04 +08:00
|
|
|
u64 x_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Shadow (from most significant bit):
|
2012-05-17 22:17:51 +08:00
|
|
|
// freed : 1
|
2012-05-10 21:48:04 +08:00
|
|
|
// tid : kTidBits
|
2013-02-01 17:42:06 +08:00
|
|
|
// is_atomic : 1
|
2013-02-01 18:02:55 +08:00
|
|
|
// is_read : 1
|
2012-05-10 21:48:04 +08:00
|
|
|
// size_log : 2
|
|
|
|
// addr0 : 3
|
2014-05-30 21:36:29 +08:00
|
|
|
// epoch : kClkBits
|
2012-06-28 00:05:06 +08:00
|
|
|
class Shadow : public FastState {
|
2012-05-10 21:48:04 +08:00
|
|
|
public:
|
2012-11-28 20:19:50 +08:00
|
|
|
explicit Shadow(u64 x)
|
|
|
|
: FastState(x) {
|
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-11-28 20:19:50 +08:00
|
|
|
explicit Shadow(const FastState &s)
|
|
|
|
: FastState(s.x_) {
|
|
|
|
ClearHistorySize();
|
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
|
2014-05-30 21:36:29 +08:00
|
|
|
DCHECK_EQ((x_ >> kClkBits) & 31, 0);
|
2012-05-10 21:48:04 +08:00
|
|
|
DCHECK_LE(addr0, 7);
|
|
|
|
DCHECK_LE(kAccessSizeLog, 3);
|
2014-05-30 21:36:29 +08:00
|
|
|
x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
|
2012-05-10 21:48:04 +08:00
|
|
|
DCHECK_EQ(kAccessSizeLog, size_log());
|
|
|
|
DCHECK_EQ(addr0, this->addr0());
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetWrite(unsigned kAccessIsWrite) {
|
2013-02-01 18:02:55 +08:00
|
|
|
DCHECK_EQ(x_ & kReadBit, 0);
|
|
|
|
if (!kAccessIsWrite)
|
|
|
|
x_ |= kReadBit;
|
2013-02-01 17:42:06 +08:00
|
|
|
DCHECK_EQ(kAccessIsWrite, IsWrite());
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
|
|
|
|
2013-02-01 17:42:06 +08:00
|
|
|
void SetAtomic(bool kIsAtomic) {
|
|
|
|
DCHECK(!IsAtomic());
|
|
|
|
if (kIsAtomic)
|
|
|
|
x_ |= kAtomicBit;
|
|
|
|
DCHECK_EQ(IsAtomic(), kIsAtomic);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsAtomic() const {
|
|
|
|
return x_ & kAtomicBit;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsZero() const {
|
|
|
|
return x_ == 0;
|
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-05-23 02:07:45 +08:00
|
|
|
static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
|
2012-05-17 22:17:51 +08:00
|
|
|
u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
|
2012-12-01 04:02:11 +08:00
|
|
|
DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
|
2012-05-10 21:48:04 +08:00
|
|
|
return shifted_xor == 0;
|
|
|
|
}
|
2012-05-23 02:07:45 +08:00
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
static ALWAYS_INLINE
|
|
|
|
bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
|
|
|
|
u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
|
2012-05-10 21:48:04 +08:00
|
|
|
return masked_xor == 0;
|
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
|
2012-05-10 21:48:04 +08:00
|
|
|
unsigned kS2AccessSize) {
|
|
|
|
bool res = false;
|
|
|
|
u64 diff = s1.addr0() - s2.addr0();
|
2019-09-12 07:19:48 +08:00
|
|
|
if ((s64)diff < 0) { // s1.addr0 < s2.addr0
|
2012-05-10 21:48:04 +08:00
|
|
|
// if (s1.addr0() + size1) > s2.addr0()) return true;
|
2014-05-30 21:36:29 +08:00
|
|
|
if (s1.size() > -diff)
|
|
|
|
res = true;
|
2012-05-10 21:48:04 +08:00
|
|
|
} else {
|
|
|
|
// if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
|
2014-05-30 21:36:29 +08:00
|
|
|
if (kS2AccessSize > diff)
|
|
|
|
res = true;
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
2014-05-30 21:36:29 +08:00
|
|
|
DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
|
|
|
|
DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
|
2012-05-10 21:48:04 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
|
|
|
|
u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
|
|
|
|
bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
|
|
|
|
bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-05-17 22:17:51 +08:00
|
|
|
// The idea behind the freed bit is as follows.
|
|
|
|
// When the memory is freed (or otherwise unaccessible) we write to the shadow
|
|
|
|
// values with tid/epoch related to the free and the freed bit set.
|
|
|
|
// During memory accesses processing the freed bit is considered
|
|
|
|
// as msb of tid. So any access races with shadow with freed bit set
|
|
|
|
// (it is as if write from a thread with which we never synchronized before).
|
|
|
|
// This allows us to detect accesses to freed memory w/o additional
|
|
|
|
// overheads in memory access processing and at the same time restore
|
|
|
|
// tid/epoch of free.
|
|
|
|
void MarkAsFreed() {
|
|
|
|
x_ |= kFreedBit;
|
|
|
|
}
|
|
|
|
|
2013-02-01 22:41:58 +08:00
|
|
|
bool IsFreed() const {
|
|
|
|
return x_ & kFreedBit;
|
|
|
|
}
|
|
|
|
|
2012-05-17 22:17:51 +08:00
|
|
|
bool GetFreedAndReset() {
|
|
|
|
bool res = x_ & kFreedBit;
|
|
|
|
x_ &= ~kFreedBit;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
|
|
|
|
bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
|
|
|
|
| (u64(kIsAtomic) << kAtomicShift));
|
2013-02-01 17:42:06 +08:00
|
|
|
DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
|
2013-02-01 18:02:55 +08:00
|
|
|
bool v = ((x_ >> kReadShift) & 3)
|
2013-02-01 17:42:06 +08:00
|
|
|
<= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
|
|
|
|
DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
|
|
|
|
(IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
|
2013-02-01 18:02:55 +08:00
|
|
|
bool v = ((x_ >> kReadShift) & 3)
|
2013-02-01 17:42:06 +08:00
|
|
|
>= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
|
|
|
|
DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
|
|
|
|
(IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
private:
|
2014-05-30 21:36:29 +08:00
|
|
|
static const u64 kReadShift = 5 + kClkBits;
|
2013-02-01 18:02:55 +08:00
|
|
|
static const u64 kReadBit = 1ull << kReadShift;
|
2014-05-30 21:36:29 +08:00
|
|
|
static const u64 kAtomicShift = 6 + kClkBits;
|
2013-02-01 17:42:06 +08:00
|
|
|
static const u64 kAtomicBit = 1ull << kAtomicShift;
|
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
|
2012-05-23 02:07:45 +08:00
|
|
|
|
2014-05-30 21:36:29 +08:00
|
|
|
static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
|
2012-05-23 02:07:45 +08:00
|
|
|
if (s1.addr0() == s2.addr0()) return true;
|
|
|
|
if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
|
|
|
|
return true;
|
|
|
|
if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
2015-03-03 01:36:02 +08:00
|
|
|
struct ThreadSignalContext;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2013-03-25 18:10:44 +08:00
|
|
|
struct JmpBuf {
|
|
|
|
uptr sp;
|
2014-09-17 05:48:22 +08:00
|
|
|
int int_signal_send;
|
|
|
|
bool in_blocking_func;
|
|
|
|
uptr in_signal_handler;
|
2013-03-25 18:10:44 +08:00
|
|
|
uptr *shadow_stack_pos;
|
|
|
|
};
|
|
|
|
|
2016-04-27 16:23:02 +08:00
|
|
|
// A Processor represents a physical thread, or a P for Go.
|
|
|
|
// It is used to store internal resources like allocate cache, and does not
|
|
|
|
// participate in race-detection logic (invisible to end user).
|
|
|
|
// In C++ it is tied to an OS thread just like ThreadState, however ideally
|
|
|
|
// it should be tied to a CPU (this way we will have fewer allocator caches).
|
|
|
|
// In Go it is tied to a P, so there are significantly fewer Processor's than
|
|
|
|
// ThreadState's (which are tied to Gs).
|
|
|
|
// A ThreadState must be wired with a Processor to handle events.
|
|
|
|
struct Processor {
|
|
|
|
ThreadState *thr; // currently wired thread, or nullptr
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-04-27 16:23:02 +08:00
|
|
|
AllocatorCache alloc_cache;
|
|
|
|
InternalAllocatorCache internal_alloc_cache;
|
|
|
|
#endif
|
|
|
|
DenseSlabAllocCache block_cache;
|
|
|
|
DenseSlabAllocCache sync_cache;
|
|
|
|
DenseSlabAllocCache clock_cache;
|
|
|
|
DDPhysicalThread *dd_pt;
|
|
|
|
};
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-05-10 19:19:50 +08:00
|
|
|
// ScopedGlobalProcessor temporary setups a global processor for the current
|
|
|
|
// thread, if it does not have one. Intended for interceptors that can run
|
|
|
|
// at the very thread end, when we already destroyed the thread processor.
|
|
|
|
struct ScopedGlobalProcessor {
|
|
|
|
ScopedGlobalProcessor();
|
|
|
|
~ScopedGlobalProcessor();
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
// This struct is stored in TLS.
|
|
|
|
struct ThreadState {
|
|
|
|
FastState fast_state;
|
|
|
|
// Synch epoch represents the threads's epoch before the last synchronization
|
|
|
|
// action. It allows to reduce number of shadow state updates.
|
|
|
|
// For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
|
|
|
|
// if we are processing write to X from the same thread at epoch=200,
|
|
|
|
// we do nothing, because both writes happen in the same 'synch epoch'.
|
|
|
|
// That is, if another memory access does not race with the former write,
|
|
|
|
// it does not race with the latter as well.
|
|
|
|
// QUESTION: can we can squeeze this into ThreadState::Fast?
|
|
|
|
// E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
|
|
|
|
// taken by epoch between synchs.
|
|
|
|
// This way we can save one load from tls.
|
|
|
|
u64 fast_synch_epoch;
|
2019-02-13 21:21:24 +08:00
|
|
|
// Technically `current` should be a separate THREADLOCAL variable;
|
|
|
|
// but it is placed here in order to share cache line with previous fields.
|
|
|
|
ThreadState* current;
|
2012-05-10 21:48:04 +08:00
|
|
|
// This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
|
|
|
|
// We do not distinguish beteween ignoring reads and writes
|
|
|
|
// for better performance.
|
|
|
|
int ignore_reads_and_writes;
|
2013-10-10 23:58:12 +08:00
|
|
|
int ignore_sync;
|
2017-04-22 00:44:27 +08:00
|
|
|
int suppress_reports;
|
2013-11-27 19:30:28 +08:00
|
|
|
// Go does not support ignores.
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-11-27 19:30:28 +08:00
|
|
|
IgnoreSet mop_ignore_set;
|
|
|
|
IgnoreSet sync_ignore_set;
|
|
|
|
#endif
|
2013-10-16 23:35:12 +08:00
|
|
|
// C/C++ uses fixed size shadow stack embed into Trace.
|
|
|
|
// Go uses malloc-allocated shadow stack with dynamic size.
|
|
|
|
uptr *shadow_stack;
|
|
|
|
uptr *shadow_stack_end;
|
2012-05-10 21:48:04 +08:00
|
|
|
uptr *shadow_stack_pos;
|
|
|
|
u64 *racy_shadow_addr;
|
|
|
|
u64 racy_state[2];
|
2012-12-06 20:16:15 +08:00
|
|
|
MutexSet mset;
|
2012-05-10 21:48:04 +08:00
|
|
|
ThreadClock clock;
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-03-25 18:10:44 +08:00
|
|
|
Vector<JmpBuf> jmp_bufs;
|
2013-12-24 20:55:56 +08:00
|
|
|
int ignore_interceptors;
|
2012-08-15 23:35:15 +08:00
|
|
|
#endif
|
2015-02-18 07:23:10 +08:00
|
|
|
#if TSAN_COLLECT_STATS
|
2012-05-10 21:48:04 +08:00
|
|
|
u64 stat[StatCnt];
|
2015-02-13 23:25:47 +08:00
|
|
|
#endif
|
2012-05-10 21:48:04 +08:00
|
|
|
const int tid;
|
2012-08-30 21:02:30 +08:00
|
|
|
const int unique_id;
|
2013-01-29 21:03:07 +08:00
|
|
|
bool in_symbolizer;
|
2013-10-03 21:37:17 +08:00
|
|
|
bool in_ignored_lib;
|
2015-03-16 22:42:21 +08:00
|
|
|
bool is_inited;
|
2014-09-03 20:25:22 +08:00
|
|
|
bool is_dead;
|
2013-02-01 22:41:58 +08:00
|
|
|
bool is_freeing;
|
2013-03-21 23:37:39 +08:00
|
|
|
bool is_vptr_access;
|
2012-05-10 21:48:04 +08:00
|
|
|
const uptr stk_addr;
|
|
|
|
const uptr stk_size;
|
|
|
|
const uptr tls_addr;
|
|
|
|
const uptr tls_size;
|
2013-11-27 19:30:28 +08:00
|
|
|
ThreadContext *tctx;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2015-02-13 23:37:11 +08:00
|
|
|
#if SANITIZER_DEBUG && !SANITIZER_GO
|
2014-02-14 20:20:42 +08:00
|
|
|
InternalDeadlockDetector internal_deadlock_detector;
|
2015-02-13 23:37:11 +08:00
|
|
|
#endif
|
2014-02-28 18:48:13 +08:00
|
|
|
DDLogicalThread *dd_lt;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-04-27 16:23:02 +08:00
|
|
|
// Current wired Processor, or nullptr. Required to handle any events.
|
2016-04-27 20:30:48 +08:00
|
|
|
Processor *proc1;
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-04-27 20:30:48 +08:00
|
|
|
Processor *proc() { return proc1; }
|
|
|
|
#else
|
|
|
|
Processor *proc();
|
|
|
|
#endif
|
2016-04-27 16:23:02 +08:00
|
|
|
|
2014-09-02 20:27:45 +08:00
|
|
|
atomic_uintptr_t in_signal_handler;
|
2015-03-03 01:36:02 +08:00
|
|
|
ThreadSignalContext *signal_ctx;
|
2012-06-28 00:05:06 +08:00
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2012-09-01 01:27:49 +08:00
|
|
|
u32 last_sleep_stack_id;
|
|
|
|
ThreadClock last_sleep_clock;
|
|
|
|
#endif
|
|
|
|
|
2012-06-22 19:08:55 +08:00
|
|
|
// Set in regions of runtime that must be signal-safe and fork-safe.
|
|
|
|
// If set, malloc must not be called.
|
|
|
|
int nomalloc;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-03-11 01:00:29 +08:00
|
|
|
const ReportDesc *current_report;
|
|
|
|
|
2012-08-30 21:02:30 +08:00
|
|
|
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
|
2014-04-11 23:38:03 +08:00
|
|
|
unsigned reuse_count,
|
2012-05-10 21:48:04 +08:00
|
|
|
uptr stk_addr, uptr stk_size,
|
|
|
|
uptr tls_addr, uptr tls_size);
|
|
|
|
};
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-01-15 11:39:04 +08:00
|
|
|
#if SANITIZER_MAC || SANITIZER_ANDROID
|
2015-11-05 21:54:50 +08:00
|
|
|
ThreadState *cur_thread();
|
2019-04-20 08:18:44 +08:00
|
|
|
void set_cur_thread(ThreadState *thr);
|
2015-11-05 21:54:50 +08:00
|
|
|
void cur_thread_finalize();
|
2020-09-17 22:04:50 +08:00
|
|
|
inline void cur_thread_init() { }
|
2015-11-05 21:54:50 +08:00
|
|
|
#else
|
2014-05-12 18:40:33 +08:00
|
|
|
__attribute__((tls_model("initial-exec")))
|
2012-07-06 00:18:28 +08:00
|
|
|
extern THREADLOCAL char cur_thread_placeholder[];
|
2020-09-17 22:04:50 +08:00
|
|
|
inline ThreadState *cur_thread() {
|
2019-02-13 21:21:24 +08:00
|
|
|
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
|
|
|
|
}
|
2020-09-17 22:04:50 +08:00
|
|
|
inline void cur_thread_init() {
|
2019-02-13 21:21:24 +08:00
|
|
|
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
|
|
|
|
if (UNLIKELY(!thr->current))
|
|
|
|
thr->current = thr;
|
|
|
|
}
|
2020-09-17 22:04:50 +08:00
|
|
|
inline void set_cur_thread(ThreadState *thr) {
|
2019-02-13 21:21:24 +08:00
|
|
|
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
2020-09-17 22:04:50 +08:00
|
|
|
inline void cur_thread_finalize() { }
|
2016-01-15 11:39:04 +08:00
|
|
|
#endif // SANITIZER_MAC || SANITIZER_ANDROID
|
2015-11-05 21:54:50 +08:00
|
|
|
#endif // SANITIZER_GO
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2020-11-03 08:42:20 +08:00
|
|
|
class ThreadContext final : public ThreadContextBase {
|
2013-03-15 21:48:44 +08:00
|
|
|
public:
|
|
|
|
explicit ThreadContext(int tid);
|
2013-03-18 18:10:15 +08:00
|
|
|
~ThreadContext();
|
2012-05-10 21:48:04 +08:00
|
|
|
ThreadState *thr;
|
2013-03-18 17:02:27 +08:00
|
|
|
u32 creation_stack_id;
|
2012-05-10 21:48:04 +08:00
|
|
|
SyncClock sync;
|
|
|
|
// Epoch at which the thread had started.
|
|
|
|
// If we see an event from the thread stamped by an older epoch,
|
|
|
|
// the event is from a dead thread that shared tid with this thread.
|
|
|
|
u64 epoch0;
|
|
|
|
u64 epoch1;
|
|
|
|
|
2013-03-15 21:48:44 +08:00
|
|
|
// Override superclass callbacks.
|
2015-04-11 10:44:24 +08:00
|
|
|
void OnDead() override;
|
|
|
|
void OnJoined(void *arg) override;
|
|
|
|
void OnFinished() override;
|
|
|
|
void OnStarted(void *arg) override;
|
|
|
|
void OnCreated(void *arg) override;
|
|
|
|
void OnReset() override;
|
|
|
|
void OnDetached(void *arg) override;
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct RacyStacks {
|
|
|
|
MD5Hash hash[2];
|
|
|
|
bool operator==(const RacyStacks &other) const {
|
|
|
|
if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
|
|
|
|
return true;
|
|
|
|
if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct RacyAddress {
|
|
|
|
uptr addr_min;
|
|
|
|
uptr addr_max;
|
|
|
|
};
|
|
|
|
|
2012-10-05 23:51:32 +08:00
|
|
|
struct FiredSuppression {
|
|
|
|
ReportType type;
|
2015-09-03 19:20:46 +08:00
|
|
|
uptr pc_or_addr;
|
2013-03-28 01:59:57 +08:00
|
|
|
Suppression *supp;
|
2012-10-05 23:51:32 +08:00
|
|
|
};
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
struct Context {
|
|
|
|
Context();
|
|
|
|
|
|
|
|
bool initialized;
|
2018-04-30 15:28:45 +08:00
|
|
|
#if !SANITIZER_GO
|
2014-01-24 20:33:35 +08:00
|
|
|
bool after_multithreaded_fork;
|
2018-04-30 15:28:45 +08:00
|
|
|
#endif
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2014-05-29 21:50:54 +08:00
|
|
|
MetaMap metamap;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
Mutex report_mtx;
|
|
|
|
int nreported;
|
|
|
|
int nmissed_expected;
|
2013-03-21 15:02:36 +08:00
|
|
|
atomic_uint64_t last_symbolize_time_ns;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2014-04-24 21:09:17 +08:00
|
|
|
void *background_thread;
|
|
|
|
atomic_uint32_t stop_background_thread;
|
|
|
|
|
2013-03-15 21:48:44 +08:00
|
|
|
ThreadRegistry *thread_registry;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2015-09-03 19:20:46 +08:00
|
|
|
Mutex racy_mtx;
|
2012-05-10 21:48:04 +08:00
|
|
|
Vector<RacyStacks> racy_stacks;
|
|
|
|
Vector<RacyAddress> racy_addresses;
|
2013-06-14 19:18:58 +08:00
|
|
|
// Number of fired suppressions may be large enough.
|
2015-09-03 19:20:46 +08:00
|
|
|
Mutex fired_suppressions_mtx;
|
2013-06-14 19:18:58 +08:00
|
|
|
InternalMmapVector<FiredSuppression> fired_suppressions;
|
2014-02-28 18:48:13 +08:00
|
|
|
DDetector *dd;
|
2014-02-21 23:07:18 +08:00
|
|
|
|
2014-08-06 02:45:02 +08:00
|
|
|
ClockAlloc clock_alloc;
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
Flags flags;
|
|
|
|
|
|
|
|
u64 stat[StatCnt];
|
|
|
|
u64 int_alloc_cnt[MBlockTypeCount];
|
|
|
|
u64 int_alloc_siz[MBlockTypeCount];
|
|
|
|
};
|
|
|
|
|
2014-03-20 18:36:20 +08:00
|
|
|
extern Context *ctx; // The one and the only global runtime context.
|
|
|
|
|
2017-03-26 23:27:04 +08:00
|
|
|
ALWAYS_INLINE Flags *flags() {
|
|
|
|
return &ctx->flags;
|
|
|
|
}
|
|
|
|
|
2013-12-24 20:55:56 +08:00
|
|
|
struct ScopedIgnoreInterceptors {
|
|
|
|
ScopedIgnoreInterceptors() {
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-12-24 20:55:56 +08:00
|
|
|
cur_thread()->ignore_interceptors++;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
~ScopedIgnoreInterceptors() {
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-12-24 20:55:56 +08:00
|
|
|
cur_thread()->ignore_interceptors--;
|
|
|
|
#endif
|
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
2017-05-01 04:35:18 +08:00
|
|
|
const char *GetObjectTypeFromTag(uptr tag);
|
2017-05-04 00:51:01 +08:00
|
|
|
const char *GetReportHeaderFromTag(uptr tag);
|
2017-05-01 04:35:18 +08:00
|
|
|
uptr TagFromShadowStackFrame(uptr pc);
|
|
|
|
|
2017-11-10 10:07:11 +08:00
|
|
|
class ScopedReportBase {
|
2012-05-10 21:48:04 +08:00
|
|
|
public:
|
2017-02-02 21:17:05 +08:00
|
|
|
void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
|
2012-12-06 20:16:15 +08:00
|
|
|
const MutexSet *mset);
|
2014-11-04 06:23:44 +08:00
|
|
|
void AddStack(StackTrace stack, bool suppressable = false);
|
2014-05-29 02:03:32 +08:00
|
|
|
void AddThread(const ThreadContext *tctx, bool suppressable = false);
|
|
|
|
void AddThread(int unique_tid, bool suppressable = false);
|
2014-03-21 21:00:18 +08:00
|
|
|
void AddUniqueTid(int unique_tid);
|
2012-05-10 21:48:04 +08:00
|
|
|
void AddMutex(const SyncVar *s);
|
2014-02-28 18:48:13 +08:00
|
|
|
u64 AddMutex(u64 id);
|
2012-05-10 21:48:04 +08:00
|
|
|
void AddLocation(uptr addr, uptr size);
|
2012-09-01 01:27:49 +08:00
|
|
|
void AddSleep(u32 stack_id);
|
2013-03-22 00:55:17 +08:00
|
|
|
void SetCount(int count);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
const ReportDesc *GetReport() const;
|
|
|
|
|
2017-11-10 10:07:11 +08:00
|
|
|
protected:
|
|
|
|
ScopedReportBase(ReportType typ, uptr tag);
|
|
|
|
~ScopedReportBase();
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
private:
|
|
|
|
ReportDesc *rep_;
|
2013-12-24 20:55:56 +08:00
|
|
|
// Symbolizer makes lots of intercepted calls. If we try to process them,
|
|
|
|
// at best it will cause deadlocks on internal mutexes.
|
|
|
|
ScopedIgnoreInterceptors ignore_interceptors_;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2014-02-28 18:48:13 +08:00
|
|
|
void AddDeadMutex(u64 id);
|
2012-12-06 20:16:15 +08:00
|
|
|
|
2017-11-10 10:07:11 +08:00
|
|
|
ScopedReportBase(const ScopedReportBase &) = delete;
|
|
|
|
void operator=(const ScopedReportBase &) = delete;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ScopedReport : public ScopedReportBase {
|
|
|
|
public:
|
|
|
|
explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
|
|
|
|
~ScopedReport();
|
|
|
|
|
|
|
|
private:
|
|
|
|
ScopedErrorReportLock lock_;
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
2021-04-30 14:32:52 +08:00
|
|
|
bool ShouldReport(ThreadState *thr, ReportType typ);
|
2016-12-20 01:52:20 +08:00
|
|
|
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
|
2014-11-04 06:23:44 +08:00
|
|
|
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
|
2017-05-01 04:35:18 +08:00
|
|
|
MutexSet *mset, uptr *tag = nullptr);
|
|
|
|
|
|
|
|
// The stack could look like:
|
|
|
|
// <start> | <main> | <foo> | tag | <bar>
|
|
|
|
// This will extract the tag and keep:
|
|
|
|
// <start> | <main> | <foo> | <bar>
|
|
|
|
template<typename StackTraceTy>
|
|
|
|
void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
|
|
|
|
if (stack->size < 2) return;
|
|
|
|
uptr possible_tag_pc = stack->trace[stack->size - 2];
|
|
|
|
uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
|
|
|
|
if (possible_tag == kExternalTagNone) return;
|
|
|
|
stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
|
|
|
|
stack->size -= 1;
|
|
|
|
if (tag) *tag = possible_tag;
|
|
|
|
}
|
2014-11-04 06:23:44 +08:00
|
|
|
|
|
|
|
template<typename StackTraceTy>
|
2017-05-01 04:35:18 +08:00
|
|
|
void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
|
|
|
|
uptr *tag = nullptr) {
|
2014-11-04 06:23:44 +08:00
|
|
|
uptr size = thr->shadow_stack_pos - thr->shadow_stack;
|
|
|
|
uptr start = 0;
|
|
|
|
if (size + !!toppc > kStackTraceMax) {
|
|
|
|
start = size + !!toppc - kStackTraceMax;
|
|
|
|
size = kStackTraceMax - !!toppc;
|
|
|
|
}
|
|
|
|
stack->Init(&thr->shadow_stack[start], size, toppc);
|
2017-05-01 04:35:18 +08:00
|
|
|
ExtractTagFromStack(stack, tag);
|
2014-11-04 06:23:44 +08:00
|
|
|
}
|
|
|
|
|
[TSan] Report proper error on allocator failures instead of CHECK(0)-ing
Summary:
Following up on and complementing D44404 and other sanitizer allocators.
Currently many allocator specific errors (OOM, for example) are reported as
a text message and CHECK(0) termination, no stack, no details, not too
helpful nor informative. To improve the situation, detailed and structured
common errors were defined and reported under the appropriate conditions.
Common tests were generalized a bit to cover a slightly different TSan
stack reporting format, extended to verify errno value and returned
pointer value check is now explicit to facilitate debugging.
Reviewers: dvyukov
Subscribers: srhines, kubamracek, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D48087
llvm-svn: 334975
2018-06-19 04:03:31 +08:00
|
|
|
#define GET_STACK_TRACE_FATAL(thr, pc) \
|
|
|
|
VarSizeStackTrace stack; \
|
|
|
|
ObtainCurrentStack(thr, pc, &stack); \
|
|
|
|
stack.ReverseOrder();
|
2012-08-16 23:08:49 +08:00
|
|
|
|
2015-02-18 07:23:10 +08:00
|
|
|
#if TSAN_COLLECT_STATS
|
2012-05-10 21:48:04 +08:00
|
|
|
void StatAggregate(u64 *dst, u64 *src);
|
|
|
|
void StatOutput(u64 *stat);
|
2015-02-13 23:25:47 +08:00
|
|
|
#endif
|
|
|
|
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
|
2015-02-18 07:23:10 +08:00
|
|
|
#if TSAN_COLLECT_STATS
|
2015-02-14 00:08:43 +08:00
|
|
|
thr->stat[typ] += n;
|
2015-02-13 23:25:47 +08:00
|
|
|
#endif
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
|
2015-02-18 07:23:10 +08:00
|
|
|
#if TSAN_COLLECT_STATS
|
2015-02-14 00:08:43 +08:00
|
|
|
thr->stat[typ] = n;
|
2015-02-13 23:25:47 +08:00
|
|
|
#endif
|
2013-03-15 21:48:44 +08:00
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-11-07 00:00:16 +08:00
|
|
|
void MapShadow(uptr addr, uptr size);
|
2015-05-30 06:31:28 +08:00
|
|
|
void MapThreadTrace(uptr addr, uptr size, const char *name);
|
2013-03-18 23:49:07 +08:00
|
|
|
void DontNeedShadowFor(uptr addr, uptr size);
|
2019-09-10 02:57:32 +08:00
|
|
|
void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
|
2012-05-10 21:48:04 +08:00
|
|
|
void InitializeShadowMemory();
|
|
|
|
void InitializeInterceptors();
|
2013-10-03 21:37:17 +08:00
|
|
|
void InitializeLibIgnore();
|
2012-05-10 21:48:04 +08:00
|
|
|
void InitializeDynamicAnnotations();
|
|
|
|
|
2014-01-24 20:33:35 +08:00
|
|
|
void ForkBefore(ThreadState *thr, uptr pc);
|
|
|
|
void ForkParentAfter(ThreadState *thr, uptr pc);
|
|
|
|
void ForkChildAfter(ThreadState *thr, uptr pc);
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
void ReportRace(ThreadState *thr);
|
2014-05-29 21:50:54 +08:00
|
|
|
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
|
2015-09-03 19:20:46 +08:00
|
|
|
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
|
2012-05-10 21:48:04 +08:00
|
|
|
bool IsExpectedReport(uptr addr, uptr size);
|
2013-03-29 00:21:19 +08:00
|
|
|
void PrintMatchedBenignRaces();
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
|
2012-11-02 20:17:51 +08:00
|
|
|
# define DPrintf Printf
|
2012-05-10 21:48:04 +08:00
|
|
|
#else
|
|
|
|
# define DPrintf(...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
|
2012-11-02 20:17:51 +08:00
|
|
|
# define DPrintf2 Printf
|
2012-05-10 21:48:04 +08:00
|
|
|
#else
|
|
|
|
# define DPrintf2(...)
|
|
|
|
#endif
|
|
|
|
|
2012-09-01 01:27:49 +08:00
|
|
|
u32 CurrentStackId(ThreadState *thr, uptr pc);
|
2013-11-27 19:30:28 +08:00
|
|
|
ReportStack *SymbolizeStackId(u32 stack_id);
|
2012-09-01 20:13:18 +08:00
|
|
|
void PrintCurrentStack(ThreadState *thr, uptr pc);
|
2014-11-07 02:43:45 +08:00
|
|
|
void PrintCurrentStackSlow(uptr pc); // uses libunwind
|
2012-09-01 01:27:49 +08:00
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
void Initialize(ThreadState *thr);
|
2017-11-29 18:23:59 +08:00
|
|
|
void MaybeSpawnBackgroundThread();
|
2012-05-10 21:48:04 +08:00
|
|
|
int Finalize(ThreadState *thr);
|
|
|
|
|
2014-05-29 21:50:54 +08:00
|
|
|
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
|
|
|
|
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
|
2012-12-21 01:29:34 +08:00
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
2013-02-01 17:42:06 +08:00
|
|
|
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
|
2012-05-10 21:48:04 +08:00
|
|
|
void MemoryAccessImpl(ThreadState *thr, uptr addr,
|
2013-02-01 17:42:06 +08:00
|
|
|
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
|
2012-05-10 21:48:04 +08:00
|
|
|
u64 *shadow_mem, Shadow cur);
|
|
|
|
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
|
2013-02-01 17:42:06 +08:00
|
|
|
uptr size, bool is_write);
|
2013-02-13 21:05:36 +08:00
|
|
|
void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
|
|
|
|
uptr size, uptr step, bool is_write);
|
2013-04-30 19:56:56 +08:00
|
|
|
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
|
|
|
int size, bool kAccessIsWrite, bool kIsAtomic);
|
2013-02-01 17:42:06 +08:00
|
|
|
|
|
|
|
const int kSizeLog1 = 0;
|
|
|
|
const int kSizeLog2 = 1;
|
|
|
|
const int kSizeLog4 = 2;
|
|
|
|
const int kSizeLog8 = 3;
|
|
|
|
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
|
2013-02-01 17:42:06 +08:00
|
|
|
uptr addr, int kAccessSizeLog) {
|
|
|
|
MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
|
|
|
|
}
|
|
|
|
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
|
2013-02-01 17:42:06 +08:00
|
|
|
uptr addr, int kAccessSizeLog) {
|
|
|
|
MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
|
|
|
|
}
|
|
|
|
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
|
2013-02-01 17:42:06 +08:00
|
|
|
uptr addr, int kAccessSizeLog) {
|
|
|
|
MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
|
|
|
|
}
|
|
|
|
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
|
2013-02-01 17:42:06 +08:00
|
|
|
uptr addr, int kAccessSizeLog) {
|
|
|
|
MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
|
|
|
|
}
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
|
|
|
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
2012-08-16 00:52:19 +08:00
|
|
|
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
2019-09-10 02:57:32 +08:00
|
|
|
void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
|
|
|
|
uptr size);
|
2013-10-10 23:58:12 +08:00
|
|
|
|
2017-03-26 23:27:04 +08:00
|
|
|
void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
|
2013-11-27 19:30:28 +08:00
|
|
|
void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
|
2017-03-26 23:27:04 +08:00
|
|
|
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
|
2013-11-27 19:30:28 +08:00
|
|
|
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
void FuncEntry(ThreadState *thr, uptr pc);
|
|
|
|
void FuncExit(ThreadState *thr);
|
|
|
|
|
|
|
|
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
|
2019-02-07 19:01:22 +08:00
|
|
|
void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
|
|
|
|
ThreadType thread_type);
|
2012-05-10 21:48:04 +08:00
|
|
|
void ThreadFinish(ThreadState *thr);
|
2020-02-19 21:18:53 +08:00
|
|
|
int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
|
2012-05-10 21:48:04 +08:00
|
|
|
void ThreadJoin(ThreadState *thr, uptr pc, int tid);
|
|
|
|
void ThreadDetach(ThreadState *thr, uptr pc, int tid);
|
|
|
|
void ThreadFinalize(ThreadState *thr);
|
2012-12-04 23:46:05 +08:00
|
|
|
void ThreadSetName(ThreadState *thr, const char *name);
|
2012-11-08 00:41:57 +08:00
|
|
|
int ThreadCount(ThreadState *thr);
|
2012-11-16 01:40:49 +08:00
|
|
|
void ProcessPendingSignals(ThreadState *thr);
|
2018-11-21 17:31:21 +08:00
|
|
|
void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-04-27 16:23:02 +08:00
|
|
|
Processor *ProcCreate();
|
|
|
|
void ProcDestroy(Processor *proc);
|
|
|
|
void ProcWire(Processor *proc, ThreadState *thr);
|
|
|
|
void ProcUnwire(Processor *proc, ThreadState *thr);
|
|
|
|
|
2017-03-26 23:27:04 +08:00
|
|
|
// Note: the parameter is called flagz, because flags is already taken
|
|
|
|
// by the global function that returns flags.
|
|
|
|
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
2017-05-01 18:01:13 +08:00
|
|
|
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
2017-03-26 23:27:04 +08:00
|
|
|
void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
|
|
|
void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
|
|
|
|
int rec = 1);
|
|
|
|
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
|
|
|
void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
|
|
|
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
2012-05-10 21:48:04 +08:00
|
|
|
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
|
|
|
|
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
|
2013-11-16 00:58:12 +08:00
|
|
|
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
|
2016-03-16 23:39:20 +08:00
|
|
|
void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
void Acquire(ThreadState *thr, uptr pc, uptr addr);
|
2014-11-18 14:44:43 +08:00
|
|
|
// AcquireGlobal synchronizes the current thread with all other threads.
|
|
|
|
// In terms of happens-before relation, it draws a HB edge from all threads
|
|
|
|
// (where they happen to execute right now) to the current thread. We use it to
|
|
|
|
// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
|
|
|
|
// right before executing finalizers. This provides a coarse, but simple
|
|
|
|
// approximation of the actual required synchronization.
|
2012-11-07 23:08:20 +08:00
|
|
|
void AcquireGlobal(ThreadState *thr, uptr pc);
|
2012-05-10 21:48:04 +08:00
|
|
|
void Release(ThreadState *thr, uptr pc, uptr addr);
|
2020-03-24 17:27:08 +08:00
|
|
|
void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
|
2012-07-28 23:27:41 +08:00
|
|
|
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
|
2012-09-01 01:27:49 +08:00
|
|
|
void AfterSleep(ThreadState *thr, uptr pc);
|
2013-10-10 23:58:12 +08:00
|
|
|
void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
|
|
|
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
2020-03-24 17:27:08 +08:00
|
|
|
void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
2013-10-10 23:58:12 +08:00
|
|
|
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
|
|
|
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
// The hacky call uses custom calling convention and an assembly thunk.
|
|
|
|
// It is considerably faster that a normal call for the caller
|
|
|
|
// if it is not executed (it is intended for slow paths from hot functions).
|
|
|
|
// The trick is that the call preserves all registers and the compiler
|
|
|
|
// does not treat it as a call.
|
|
|
|
// If it does not work for you, use normal call.
|
2015-11-03 22:33:39 +08:00
|
|
|
#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
|
2012-05-10 21:48:04 +08:00
|
|
|
// The caller may not create the stack frame for itself at all,
|
|
|
|
// so we create a reserve stack frame for it (1024b must be enough).
|
|
|
|
#define HACKY_CALL(f) \
|
2012-09-02 19:24:07 +08:00
|
|
|
__asm__ __volatile__("sub $1024, %%rsp;" \
|
2013-12-05 15:44:35 +08:00
|
|
|
CFI_INL_ADJUST_CFA_OFFSET(1024) \
|
2012-11-26 22:20:26 +08:00
|
|
|
".hidden " #f "_thunk;" \
|
2012-05-10 21:48:04 +08:00
|
|
|
"call " #f "_thunk;" \
|
2012-09-02 19:24:07 +08:00
|
|
|
"add $1024, %%rsp;" \
|
2013-12-05 15:44:35 +08:00
|
|
|
CFI_INL_ADJUST_CFA_OFFSET(-1024) \
|
2012-09-02 19:24:07 +08:00
|
|
|
::: "memory", "cc");
|
2012-05-10 21:48:04 +08:00
|
|
|
#else
|
|
|
|
#define HACKY_CALL(f) f()
|
|
|
|
#endif
|
|
|
|
|
2012-07-06 00:18:28 +08:00
|
|
|
void TraceSwitch(ThreadState *thr);
|
2012-11-28 18:35:31 +08:00
|
|
|
uptr TraceTopPC(ThreadState *thr);
|
2012-11-28 20:19:50 +08:00
|
|
|
uptr TraceSize();
|
2012-12-04 20:19:53 +08:00
|
|
|
uptr TraceParts();
|
2013-03-20 18:31:53 +08:00
|
|
|
Trace *ThreadTrace(int tid);
|
2012-07-06 00:18:28 +08:00
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
extern "C" void __tsan_trace_switch();
|
2013-03-29 02:52:40 +08:00
|
|
|
void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
|
2012-12-06 20:16:15 +08:00
|
|
|
EventType typ, u64 addr) {
|
2014-05-15 20:51:48 +08:00
|
|
|
if (!kCollectHistory)
|
|
|
|
return;
|
2012-12-06 20:16:15 +08:00
|
|
|
DCHECK_GE((int)typ, 0);
|
|
|
|
DCHECK_LE((int)typ, 7);
|
2017-08-25 16:52:28 +08:00
|
|
|
DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
|
2012-05-10 21:48:04 +08:00
|
|
|
StatInc(thr, StatEvents);
|
2012-11-28 21:01:32 +08:00
|
|
|
u64 pos = fs.GetTracePos();
|
|
|
|
if (UNLIKELY((pos % kTracePartSize) == 0)) {
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2012-05-10 21:48:04 +08:00
|
|
|
HACKY_CALL(__tsan_trace_switch);
|
2012-07-06 00:18:28 +08:00
|
|
|
#else
|
|
|
|
TraceSwitch(thr);
|
|
|
|
#endif
|
|
|
|
}
|
2012-11-28 18:35:31 +08:00
|
|
|
Event *trace = (Event*)GetThreadTrace(fs.tid());
|
2012-11-28 21:01:32 +08:00
|
|
|
Event *evp = &trace[pos];
|
2017-08-25 16:52:28 +08:00
|
|
|
Event ev = (u64)addr | ((u64)typ << kEventPCBits);
|
2012-05-10 21:48:04 +08:00
|
|
|
*evp = ev;
|
|
|
|
}
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2015-02-20 14:42:41 +08:00
|
|
|
uptr ALWAYS_INLINE HeapEnd() {
|
2015-11-26 21:10:47 +08:00
|
|
|
return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
|
2015-02-20 14:42:41 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-13 21:21:24 +08:00
|
|
|
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
|
|
|
|
void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
|
|
|
|
void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
|
|
|
|
|
|
|
|
// These need to match __tsan_switch_to_fiber_* flags defined in
|
|
|
|
// tsan_interface.h. See documentation there as well.
|
|
|
|
enum FiberSwitchFlags {
|
|
|
|
FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
|
|
|
|
};
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
} // namespace __tsan
|
|
|
|
|
|
|
|
#endif // TSAN_RTL_H
|