2012-05-10 21:48:04 +08:00
|
|
|
//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-05-10 21:48:04 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
|
|
//
|
|
|
|
// Main internal TSan header file.
|
|
|
|
//
|
|
|
|
// Ground rules:
|
|
|
|
// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
|
|
|
|
// function-scope locals)
|
|
|
|
// - All functions/classes/etc reside in namespace __tsan, except for those
|
|
|
|
// declared in tsan_interface.h.
|
|
|
|
// - Platform-specific files should be used instead of ifdefs (*).
|
|
|
|
// - No system headers included in header files (*).
|
|
|
|
// - Platform specific headres included only into platform-specific files (*).
|
|
|
|
//
|
|
|
|
// (*) Except when inlining is critical for performance.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef TSAN_RTL_H
|
|
|
|
#define TSAN_RTL_H
|
|
|
|
|
2012-12-05 18:09:15 +08:00
|
|
|
#include "sanitizer_common/sanitizer_allocator.h"
|
2013-05-29 17:15:39 +08:00
|
|
|
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
2013-12-05 15:44:35 +08:00
|
|
|
#include "sanitizer_common/sanitizer_asm.h"
|
2013-03-15 21:48:44 +08:00
|
|
|
#include "sanitizer_common/sanitizer_common.h"
|
2014-02-28 18:48:13 +08:00
|
|
|
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
|
2013-10-03 21:37:17 +08:00
|
|
|
#include "sanitizer_common/sanitizer_libignore.h"
|
2013-06-26 23:37:14 +08:00
|
|
|
#include "sanitizer_common/sanitizer_suppressions.h"
|
2013-03-15 21:48:44 +08:00
|
|
|
#include "sanitizer_common/sanitizer_thread_registry.h"
|
2017-12-04 20:30:09 +08:00
|
|
|
#include "sanitizer_common/sanitizer_vector.h"
|
2012-05-10 21:48:04 +08:00
|
|
|
#include "tsan_clock.h"
|
|
|
|
#include "tsan_defs.h"
|
|
|
|
#include "tsan_flags.h"
|
2021-09-23 01:25:28 +08:00
|
|
|
#include "tsan_ignoreset.h"
|
2017-12-04 20:30:09 +08:00
|
|
|
#include "tsan_mman.h"
|
2012-12-06 20:16:15 +08:00
|
|
|
#include "tsan_mutexset.h"
|
2021-09-23 01:25:28 +08:00
|
|
|
#include "tsan_platform.h"
|
|
|
|
#include "tsan_report.h"
|
|
|
|
#include "tsan_shadow.h"
|
2014-05-29 21:50:54 +08:00
|
|
|
#include "tsan_stack_trace.h"
|
2021-09-23 01:25:28 +08:00
|
|
|
#include "tsan_sync.h"
|
|
|
|
#include "tsan_trace.h"
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2012-12-04 23:13:30 +08:00
|
|
|
#if SANITIZER_WORDSIZE != 64
|
|
|
|
# error "ThreadSanitizer is supported only on 64-bit platforms"
|
|
|
|
#endif
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
namespace __tsan {
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-03-18 18:32:21 +08:00
|
|
|
struct MapUnmapCallback;
|
2015-12-09 05:54:39 +08:00
|
|
|
#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
|
2019-04-27 14:30:52 +08:00
|
|
|
|
2017-05-15 22:47:19 +08:00
|
|
|
struct AP32 {
|
|
|
|
static const uptr kSpaceBeg = 0;
|
|
|
|
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
|
|
|
|
static const uptr kMetadataSize = 0;
|
|
|
|
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
|
2019-04-27 14:30:52 +08:00
|
|
|
static const uptr kRegionSizeLog = 20;
|
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`.
Summary:
This is a follow up patch to r346956 for the `SizeClassAllocator32`
allocator.
This patch makes `AddressSpaceView` a template parameter both to the
`ByteMap` implementations (but makes `LocalAddressSpaceView` the
default), some `AP32` implementations and is used in `SizeClassAllocator32`.
The actual changes to `ByteMap` implementations and
`SizeClassAllocator32` are very simple. However the patch is large
because it requires changing all the `AP32` definitions, and users of
those definitions.
For ASan and LSan we make `AP32` and `ByteMap` templateds type that take
a single `AddressSpaceView` argument. This has been done because we will
instantiate the allocator with a type that isn't `LocalAddressSpaceView`
in the future patches. For the allocators used in the other sanitizers
(i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is
hard coded because we do not intend to instantiate the allocators with
any other type.
In the cases where untemplated types have become templated on a single
`AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has
been changed to have a `ASVT` suffix (Address Space View Type) to
indicate they are templated. The only exception to this are the `AP32`
types due to the desire to keep the type name as short as possible.
In order to check that template is instantiated in the correct a way a
`static_assert(...)` has been added that checks that the
`AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches
the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h`
header.
rdar://problem/45284065
Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov
Subscribers: mgorny, llvm-commits, #sanitizers
Differential Revision: https://reviews.llvm.org/D54904
llvm-svn: 349138
2018-12-14 17:03:18 +08:00
|
|
|
using AddressSpaceView = LocalAddressSpaceView;
|
2017-05-15 22:47:19 +08:00
|
|
|
typedef __tsan::MapUnmapCallback MapUnmapCallback;
|
2019-04-27 14:30:52 +08:00
|
|
|
static const uptr kFlags = 0;
|
2017-05-15 22:47:19 +08:00
|
|
|
};
|
|
|
|
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
|
2015-02-20 14:42:41 +08:00
|
|
|
#else
|
2016-08-26 04:23:08 +08:00
|
|
|
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
2021-08-08 18:20:48 +08:00
|
|
|
# if defined(__s390x__)
|
|
|
|
typedef MappingS390x Mapping;
|
|
|
|
# else
|
|
|
|
typedef Mapping48AddressSpace Mapping;
|
|
|
|
# endif
|
2016-08-26 04:23:08 +08:00
|
|
|
static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
|
|
|
|
static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
|
|
|
|
static const uptr kMetadataSize = 0;
|
|
|
|
typedef DefaultSizeClassMap SizeClassMap;
|
|
|
|
typedef __tsan::MapUnmapCallback MapUnmapCallback;
|
2019-04-27 01:04:05 +08:00
|
|
|
static const uptr kFlags = 0;
|
2018-12-22 05:09:31 +08:00
|
|
|
using AddressSpaceView = LocalAddressSpaceView;
|
2016-08-26 04:23:08 +08:00
|
|
|
};
|
|
|
|
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
2015-02-20 14:42:41 +08:00
|
|
|
#endif
|
2019-05-02 03:41:54 +08:00
|
|
|
typedef CombinedAllocator<PrimaryAllocator> Allocator;
|
2019-05-02 03:30:49 +08:00
|
|
|
typedef Allocator::AllocatorCache AllocatorCache;
|
2012-08-30 21:02:30 +08:00
|
|
|
Allocator *allocator();
|
2012-08-15 23:35:15 +08:00
|
|
|
#endif
|
|
|
|
|
2015-03-03 01:36:02 +08:00
|
|
|
struct ThreadSignalContext;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2013-03-25 18:10:44 +08:00
|
|
|
struct JmpBuf {
|
|
|
|
uptr sp;
|
2014-09-17 05:48:22 +08:00
|
|
|
int int_signal_send;
|
|
|
|
bool in_blocking_func;
|
|
|
|
uptr in_signal_handler;
|
2013-03-25 18:10:44 +08:00
|
|
|
uptr *shadow_stack_pos;
|
|
|
|
};
|
|
|
|
|
2016-04-27 16:23:02 +08:00
|
|
|
// A Processor represents a physical thread, or a P for Go.
|
|
|
|
// It is used to store internal resources like allocate cache, and does not
|
|
|
|
// participate in race-detection logic (invisible to end user).
|
|
|
|
// In C++ it is tied to an OS thread just like ThreadState, however ideally
|
|
|
|
// it should be tied to a CPU (this way we will have fewer allocator caches).
|
|
|
|
// In Go it is tied to a P, so there are significantly fewer Processor's than
|
|
|
|
// ThreadState's (which are tied to Gs).
|
|
|
|
// A ThreadState must be wired with a Processor to handle events.
|
|
|
|
struct Processor {
|
|
|
|
ThreadState *thr; // currently wired thread, or nullptr
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-04-27 16:23:02 +08:00
|
|
|
AllocatorCache alloc_cache;
|
|
|
|
InternalAllocatorCache internal_alloc_cache;
|
|
|
|
#endif
|
|
|
|
DenseSlabAllocCache block_cache;
|
|
|
|
DenseSlabAllocCache sync_cache;
|
2021-11-13 00:43:26 +08:00
|
|
|
DenseSlabAllocCache clock_cache;
|
2016-04-27 16:23:02 +08:00
|
|
|
DDPhysicalThread *dd_pt;
|
|
|
|
};
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-05-10 19:19:50 +08:00
|
|
|
// ScopedGlobalProcessor temporary setups a global processor for the current
|
|
|
|
// thread, if it does not have one. Intended for interceptors that can run
|
|
|
|
// at the very thread end, when we already destroyed the thread processor.
|
|
|
|
struct ScopedGlobalProcessor {
|
|
|
|
ScopedGlobalProcessor();
|
|
|
|
~ScopedGlobalProcessor();
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
// This struct is stored in TLS.
|
|
|
|
struct ThreadState {
|
|
|
|
FastState fast_state;
|
2021-11-13 00:43:26 +08:00
|
|
|
// Synch epoch represents the threads's epoch before the last synchronization
|
|
|
|
// action. It allows to reduce number of shadow state updates.
|
|
|
|
// For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
|
|
|
|
// if we are processing write to X from the same thread at epoch=200,
|
|
|
|
// we do nothing, because both writes happen in the same 'synch epoch'.
|
|
|
|
// That is, if another memory access does not race with the former write,
|
|
|
|
// it does not race with the latter as well.
|
|
|
|
// QUESTION: can we can squeeze this into ThreadState::Fast?
|
|
|
|
// E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
|
|
|
|
// taken by epoch between synchs.
|
|
|
|
// This way we can save one load from tls.
|
|
|
|
u64 fast_synch_epoch;
|
2019-02-13 21:21:24 +08:00
|
|
|
// Technically `current` should be a separate THREADLOCAL variable;
|
|
|
|
// but it is placed here in order to share cache line with previous fields.
|
|
|
|
ThreadState* current;
|
2012-05-10 21:48:04 +08:00
|
|
|
// This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
|
|
|
|
// We do not distinguish beteween ignoring reads and writes
|
|
|
|
// for better performance.
|
|
|
|
int ignore_reads_and_writes;
|
2021-11-13 00:43:26 +08:00
|
|
|
atomic_sint32_t pending_signals;
|
|
|
|
int ignore_sync;
|
2017-04-22 00:44:27 +08:00
|
|
|
int suppress_reports;
|
2013-11-27 19:30:28 +08:00
|
|
|
// Go does not support ignores.
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-11-27 19:30:28 +08:00
|
|
|
IgnoreSet mop_ignore_set;
|
|
|
|
IgnoreSet sync_ignore_set;
|
2021-09-25 18:57:29 +08:00
|
|
|
#endif
|
2021-11-13 02:28:39 +08:00
|
|
|
uptr *shadow_stack;
|
2013-10-16 23:35:12 +08:00
|
|
|
uptr *shadow_stack_end;
|
2021-11-13 00:43:26 +08:00
|
|
|
uptr *shadow_stack_pos;
|
2021-08-05 00:55:00 +08:00
|
|
|
RawShadow *racy_shadow_addr;
|
|
|
|
RawShadow racy_state[2];
|
2021-11-13 00:43:26 +08:00
|
|
|
MutexSet mset;
|
|
|
|
ThreadClock clock;
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-03-25 18:10:44 +08:00
|
|
|
Vector<JmpBuf> jmp_bufs;
|
2021-11-13 00:43:26 +08:00
|
|
|
int ignore_interceptors;
|
|
|
|
#endif
|
|
|
|
const Tid tid;
|
|
|
|
const int unique_id;
|
|
|
|
bool in_symbolizer;
|
2013-10-03 21:37:17 +08:00
|
|
|
bool in_ignored_lib;
|
2015-03-16 22:42:21 +08:00
|
|
|
bool is_inited;
|
2014-09-03 20:25:22 +08:00
|
|
|
bool is_dead;
|
2021-11-13 00:43:26 +08:00
|
|
|
bool is_freeing;
|
|
|
|
bool is_vptr_access;
|
|
|
|
const uptr stk_addr;
|
|
|
|
const uptr stk_size;
|
|
|
|
const uptr tls_addr;
|
|
|
|
const uptr tls_size;
|
2013-11-27 19:30:28 +08:00
|
|
|
ThreadContext *tctx;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2014-02-28 18:48:13 +08:00
|
|
|
DDLogicalThread *dd_lt;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-04-27 16:23:02 +08:00
|
|
|
// Current wired Processor, or nullptr. Required to handle any events.
|
2016-04-27 20:30:48 +08:00
|
|
|
Processor *proc1;
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-04-27 20:30:48 +08:00
|
|
|
Processor *proc() { return proc1; }
|
|
|
|
#else
|
|
|
|
Processor *proc();
|
|
|
|
#endif
|
2016-04-27 16:23:02 +08:00
|
|
|
|
2014-09-02 20:27:45 +08:00
|
|
|
atomic_uintptr_t in_signal_handler;
|
2015-03-03 01:36:02 +08:00
|
|
|
ThreadSignalContext *signal_ctx;
|
2012-06-28 00:05:06 +08:00
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2021-07-30 19:50:15 +08:00
|
|
|
StackID last_sleep_stack_id;
|
2021-11-13 00:43:26 +08:00
|
|
|
ThreadClock last_sleep_clock;
|
2012-09-01 01:27:49 +08:00
|
|
|
#endif
|
|
|
|
|
2012-06-22 19:08:55 +08:00
|
|
|
// Set in regions of runtime that must be signal-safe and fork-safe.
|
|
|
|
// If set, malloc must not be called.
|
|
|
|
int nomalloc;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-03-11 01:00:29 +08:00
|
|
|
const ReportDesc *current_report;
|
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
// Current position in tctx->trace.Back()->events (Event*).
|
|
|
|
atomic_uintptr_t trace_pos;
|
|
|
|
// PC of the last memory access, used to compute PC deltas in the trace.
|
|
|
|
uptr trace_prev_pc;
|
|
|
|
Sid sid;
|
|
|
|
Epoch epoch;
|
|
|
|
|
|
|
|
explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
|
|
|
|
unsigned reuse_count, uptr stk_addr, uptr stk_size,
|
|
|
|
uptr tls_addr, uptr tls_size);
|
2021-09-25 19:13:11 +08:00
|
|
|
} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2016-01-15 11:39:04 +08:00
|
|
|
#if SANITIZER_MAC || SANITIZER_ANDROID
|
2015-11-05 21:54:50 +08:00
|
|
|
ThreadState *cur_thread();
|
2019-04-20 08:18:44 +08:00
|
|
|
void set_cur_thread(ThreadState *thr);
|
2015-11-05 21:54:50 +08:00
|
|
|
void cur_thread_finalize();
|
2021-09-24 12:57:37 +08:00
|
|
|
inline ThreadState *cur_thread_init() { return cur_thread(); }
|
|
|
|
# else
|
2014-05-12 18:40:33 +08:00
|
|
|
__attribute__((tls_model("initial-exec")))
|
2012-07-06 00:18:28 +08:00
|
|
|
extern THREADLOCAL char cur_thread_placeholder[];
|
2020-09-17 22:04:50 +08:00
|
|
|
inline ThreadState *cur_thread() {
|
2019-02-13 21:21:24 +08:00
|
|
|
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
|
|
|
|
}
|
2021-09-24 12:57:37 +08:00
|
|
|
inline ThreadState *cur_thread_init() {
|
2019-02-13 21:21:24 +08:00
|
|
|
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
|
|
|
|
if (UNLIKELY(!thr->current))
|
|
|
|
thr->current = thr;
|
2021-09-24 12:57:37 +08:00
|
|
|
return thr->current;
|
2019-02-13 21:21:24 +08:00
|
|
|
}
|
2020-09-17 22:04:50 +08:00
|
|
|
inline void set_cur_thread(ThreadState *thr) {
|
2019-02-13 21:21:24 +08:00
|
|
|
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
|
2012-05-10 21:48:04 +08:00
|
|
|
}
|
2020-09-17 22:04:50 +08:00
|
|
|
inline void cur_thread_finalize() { }
|
2021-09-24 12:57:37 +08:00
|
|
|
# endif // SANITIZER_MAC || SANITIZER_ANDROID
|
2015-11-05 21:54:50 +08:00
|
|
|
#endif // SANITIZER_GO
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2020-11-03 08:42:20 +08:00
|
|
|
class ThreadContext final : public ThreadContextBase {
|
2013-03-15 21:48:44 +08:00
|
|
|
public:
|
2021-07-30 19:50:15 +08:00
|
|
|
explicit ThreadContext(Tid tid);
|
2013-03-18 18:10:15 +08:00
|
|
|
~ThreadContext();
|
2012-05-10 21:48:04 +08:00
|
|
|
ThreadState *thr;
|
2021-07-30 19:50:15 +08:00
|
|
|
StackID creation_stack_id;
|
2021-11-13 00:43:26 +08:00
|
|
|
SyncClock sync;
|
|
|
|
// Epoch at which the thread had started.
|
|
|
|
// If we see an event from the thread stamped by an older epoch,
|
|
|
|
// the event is from a dead thread that shared tid with this thread.
|
|
|
|
u64 epoch0;
|
|
|
|
u64 epoch1;
|
|
|
|
|
|
|
|
v3::Trace trace;
|
2021-08-05 23:18:17 +08:00
|
|
|
|
2013-03-15 21:48:44 +08:00
|
|
|
// Override superclass callbacks.
|
2015-04-11 10:44:24 +08:00
|
|
|
void OnDead() override;
|
|
|
|
void OnJoined(void *arg) override;
|
|
|
|
void OnFinished() override;
|
|
|
|
void OnStarted(void *arg) override;
|
|
|
|
void OnCreated(void *arg) override;
|
|
|
|
void OnReset() override;
|
|
|
|
void OnDetached(void *arg) override;
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct RacyStacks {
|
|
|
|
MD5Hash hash[2];
|
2021-09-25 17:56:53 +08:00
|
|
|
bool operator==(const RacyStacks &other) const;
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct RacyAddress {
|
|
|
|
uptr addr_min;
|
|
|
|
uptr addr_max;
|
|
|
|
};
|
|
|
|
|
2012-10-05 23:51:32 +08:00
|
|
|
struct FiredSuppression {
|
|
|
|
ReportType type;
|
2015-09-03 19:20:46 +08:00
|
|
|
uptr pc_or_addr;
|
2013-03-28 01:59:57 +08:00
|
|
|
Suppression *supp;
|
2012-10-05 23:51:32 +08:00
|
|
|
};
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
struct Context {
|
|
|
|
Context();
|
|
|
|
|
|
|
|
bool initialized;
|
2018-04-30 15:28:45 +08:00
|
|
|
#if !SANITIZER_GO
|
2014-01-24 20:33:35 +08:00
|
|
|
bool after_multithreaded_fork;
|
2018-04-30 15:28:45 +08:00
|
|
|
#endif
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2014-05-29 21:50:54 +08:00
|
|
|
MetaMap metamap;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
Mutex report_mtx;
|
|
|
|
int nreported;
|
2013-03-21 15:02:36 +08:00
|
|
|
atomic_uint64_t last_symbolize_time_ns;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2014-04-24 21:09:17 +08:00
|
|
|
void *background_thread;
|
|
|
|
atomic_uint32_t stop_background_thread;
|
|
|
|
|
2021-07-29 16:26:50 +08:00
|
|
|
ThreadRegistry thread_registry;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2015-09-03 19:20:46 +08:00
|
|
|
Mutex racy_mtx;
|
2012-05-10 21:48:04 +08:00
|
|
|
Vector<RacyStacks> racy_stacks;
|
|
|
|
Vector<RacyAddress> racy_addresses;
|
2013-06-14 19:18:58 +08:00
|
|
|
// Number of fired suppressions may be large enough.
|
2015-09-03 19:20:46 +08:00
|
|
|
Mutex fired_suppressions_mtx;
|
2013-06-14 19:18:58 +08:00
|
|
|
InternalMmapVector<FiredSuppression> fired_suppressions;
|
2014-02-28 18:48:13 +08:00
|
|
|
DDetector *dd;
|
2014-02-21 23:07:18 +08:00
|
|
|
|
2014-08-06 02:45:02 +08:00
|
|
|
ClockAlloc clock_alloc;
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
Flags flags;
|
2021-09-21 17:50:09 +08:00
|
|
|
fd_t memprof_fd;
|
2021-08-05 23:18:17 +08:00
|
|
|
|
|
|
|
Mutex slot_mtx;
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
2014-03-20 18:36:20 +08:00
|
|
|
extern Context *ctx; // The one and the only global runtime context.
|
|
|
|
|
2017-03-26 23:27:04 +08:00
|
|
|
ALWAYS_INLINE Flags *flags() {
|
|
|
|
return &ctx->flags;
|
|
|
|
}
|
|
|
|
|
2013-12-24 20:55:56 +08:00
|
|
|
struct ScopedIgnoreInterceptors {
|
|
|
|
ScopedIgnoreInterceptors() {
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-12-24 20:55:56 +08:00
|
|
|
cur_thread()->ignore_interceptors++;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
~ScopedIgnoreInterceptors() {
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2013-12-24 20:55:56 +08:00
|
|
|
cur_thread()->ignore_interceptors--;
|
|
|
|
#endif
|
|
|
|
}
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
2017-05-01 04:35:18 +08:00
|
|
|
const char *GetObjectTypeFromTag(uptr tag);
|
2017-05-04 00:51:01 +08:00
|
|
|
const char *GetReportHeaderFromTag(uptr tag);
|
2017-05-01 04:35:18 +08:00
|
|
|
uptr TagFromShadowStackFrame(uptr pc);
|
|
|
|
|
2017-11-10 10:07:11 +08:00
|
|
|
class ScopedReportBase {
|
2012-05-10 21:48:04 +08:00
|
|
|
public:
|
2021-11-13 00:43:26 +08:00
|
|
|
void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
|
|
|
|
const MutexSet *mset);
|
2014-11-04 06:23:44 +08:00
|
|
|
void AddStack(StackTrace stack, bool suppressable = false);
|
2014-05-29 02:03:32 +08:00
|
|
|
void AddThread(const ThreadContext *tctx, bool suppressable = false);
|
2021-11-13 00:43:26 +08:00
|
|
|
void AddThread(Tid unique_tid, bool suppressable = false);
|
2021-07-30 19:50:15 +08:00
|
|
|
void AddUniqueTid(Tid unique_tid);
|
2021-11-13 00:43:26 +08:00
|
|
|
void AddMutex(const SyncVar *s);
|
|
|
|
u64 AddMutex(u64 id);
|
2012-05-10 21:48:04 +08:00
|
|
|
void AddLocation(uptr addr, uptr size);
|
2021-07-30 19:50:15 +08:00
|
|
|
void AddSleep(StackID stack_id);
|
2013-03-22 00:55:17 +08:00
|
|
|
void SetCount(int count);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
const ReportDesc *GetReport() const;
|
|
|
|
|
2017-11-10 10:07:11 +08:00
|
|
|
protected:
|
|
|
|
ScopedReportBase(ReportType typ, uptr tag);
|
|
|
|
~ScopedReportBase();
|
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
private:
|
|
|
|
ReportDesc *rep_;
|
2013-12-24 20:55:56 +08:00
|
|
|
// Symbolizer makes lots of intercepted calls. If we try to process them,
|
|
|
|
// at best it will cause deadlocks on internal mutexes.
|
|
|
|
ScopedIgnoreInterceptors ignore_interceptors_;
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
void AddDeadMutex(u64 id);
|
|
|
|
|
2017-11-10 10:07:11 +08:00
|
|
|
ScopedReportBase(const ScopedReportBase &) = delete;
|
|
|
|
void operator=(const ScopedReportBase &) = delete;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ScopedReport : public ScopedReportBase {
|
|
|
|
public:
|
|
|
|
explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
|
|
|
|
~ScopedReport();
|
|
|
|
|
|
|
|
private:
|
|
|
|
ScopedErrorReportLock lock_;
|
2012-05-10 21:48:04 +08:00
|
|
|
};
|
|
|
|
|
2021-04-30 14:32:52 +08:00
|
|
|
bool ShouldReport(ThreadState *thr, ReportType typ);
|
2016-12-20 01:52:20 +08:00
|
|
|
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
|
2021-11-13 00:43:26 +08:00
|
|
|
void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
|
|
|
|
MutexSet *mset, uptr *tag = nullptr);
|
2017-05-01 04:35:18 +08:00
|
|
|
|
|
|
|
// The stack could look like:
|
|
|
|
// <start> | <main> | <foo> | tag | <bar>
|
|
|
|
// This will extract the tag and keep:
|
|
|
|
// <start> | <main> | <foo> | <bar>
|
|
|
|
template<typename StackTraceTy>
|
|
|
|
void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
|
|
|
|
if (stack->size < 2) return;
|
|
|
|
uptr possible_tag_pc = stack->trace[stack->size - 2];
|
|
|
|
uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
|
|
|
|
if (possible_tag == kExternalTagNone) return;
|
|
|
|
stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
|
|
|
|
stack->size -= 1;
|
|
|
|
if (tag) *tag = possible_tag;
|
|
|
|
}
|
2014-11-04 06:23:44 +08:00
|
|
|
|
|
|
|
template<typename StackTraceTy>
|
2017-05-01 04:35:18 +08:00
|
|
|
void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
|
|
|
|
uptr *tag = nullptr) {
|
2014-11-04 06:23:44 +08:00
|
|
|
uptr size = thr->shadow_stack_pos - thr->shadow_stack;
|
|
|
|
uptr start = 0;
|
|
|
|
if (size + !!toppc > kStackTraceMax) {
|
|
|
|
start = size + !!toppc - kStackTraceMax;
|
|
|
|
size = kStackTraceMax - !!toppc;
|
|
|
|
}
|
|
|
|
stack->Init(&thr->shadow_stack[start], size, toppc);
|
2017-05-01 04:35:18 +08:00
|
|
|
ExtractTagFromStack(stack, tag);
|
2014-11-04 06:23:44 +08:00
|
|
|
}
|
|
|
|
|
[TSan] Report proper error on allocator failures instead of CHECK(0)-ing
Summary:
Following up on and complementing D44404 and other sanitizer allocators.
Currently many allocator specific errors (OOM, for example) are reported as
a text message and CHECK(0) termination, no stack, no details, not too
helpful nor informative. To improve the situation, detailed and structured
common errors were defined and reported under the appropriate conditions.
Common tests were generalized a bit to cover a slightly different TSan
stack reporting format, extended to verify errno value and returned
pointer value check is now explicit to facilitate debugging.
Reviewers: dvyukov
Subscribers: srhines, kubamracek, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D48087
llvm-svn: 334975
2018-06-19 04:03:31 +08:00
|
|
|
#define GET_STACK_TRACE_FATAL(thr, pc) \
|
|
|
|
VarSizeStackTrace stack; \
|
|
|
|
ObtainCurrentStack(thr, pc, &stack); \
|
|
|
|
stack.ReverseOrder();
|
2012-08-16 23:08:49 +08:00
|
|
|
|
2012-11-07 00:00:16 +08:00
|
|
|
void MapShadow(uptr addr, uptr size);
|
2015-05-30 06:31:28 +08:00
|
|
|
void MapThreadTrace(uptr addr, uptr size, const char *name);
|
2013-03-18 23:49:07 +08:00
|
|
|
void DontNeedShadowFor(uptr addr, uptr size);
|
2019-09-10 02:57:32 +08:00
|
|
|
void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
|
2012-05-10 21:48:04 +08:00
|
|
|
void InitializeShadowMemory();
|
|
|
|
void InitializeInterceptors();
|
2013-10-03 21:37:17 +08:00
|
|
|
void InitializeLibIgnore();
|
2012-05-10 21:48:04 +08:00
|
|
|
void InitializeDynamicAnnotations();
|
|
|
|
|
2014-01-24 20:33:35 +08:00
|
|
|
void ForkBefore(ThreadState *thr, uptr pc);
|
|
|
|
void ForkParentAfter(ThreadState *thr, uptr pc);
|
2021-11-12 03:37:05 +08:00
|
|
|
void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
|
2014-01-24 20:33:35 +08:00
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
void ReportRace(ThreadState *thr);
|
2014-05-29 21:50:54 +08:00
|
|
|
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
|
2015-09-03 19:20:46 +08:00
|
|
|
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
|
2012-05-10 21:48:04 +08:00
|
|
|
bool IsExpectedReport(uptr addr, uptr size);
|
|
|
|
|
|
|
|
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
|
2012-11-02 20:17:51 +08:00
|
|
|
# define DPrintf Printf
|
2012-05-10 21:48:04 +08:00
|
|
|
#else
|
|
|
|
# define DPrintf(...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
|
2012-11-02 20:17:51 +08:00
|
|
|
# define DPrintf2 Printf
|
2012-05-10 21:48:04 +08:00
|
|
|
#else
|
|
|
|
# define DPrintf2(...)
|
|
|
|
#endif
|
|
|
|
|
2021-07-30 19:50:15 +08:00
|
|
|
StackID CurrentStackId(ThreadState *thr, uptr pc);
|
|
|
|
ReportStack *SymbolizeStackId(StackID stack_id);
|
2012-09-01 20:13:18 +08:00
|
|
|
void PrintCurrentStack(ThreadState *thr, uptr pc);
|
2014-11-07 02:43:45 +08:00
|
|
|
void PrintCurrentStackSlow(uptr pc); // uses libunwind
|
2021-07-28 21:42:18 +08:00
|
|
|
MBlock *JavaHeapBlock(uptr addr, uptr *start);
|
2012-09-01 01:27:49 +08:00
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
void Initialize(ThreadState *thr);
|
2017-11-29 18:23:59 +08:00
|
|
|
void MaybeSpawnBackgroundThread();
|
2012-05-10 21:48:04 +08:00
|
|
|
int Finalize(ThreadState *thr);
|
|
|
|
|
2014-05-29 21:50:54 +08:00
|
|
|
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
|
|
|
|
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
|
2012-12-21 01:29:34 +08:00
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
|
|
|
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
|
|
|
|
void MemoryAccessImpl(ThreadState *thr, uptr addr,
|
|
|
|
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
|
|
|
|
u64 *shadow_mem, Shadow cur);
|
|
|
|
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
|
|
|
|
uptr size, bool is_write);
|
tsan: new MemoryAccess interface
Currently we have MemoryAccess function that accepts
"bool kAccessIsWrite, bool kIsAtomic" and 4 wrappers:
MemoryRead/MemoryWrite/MemoryReadAtomic/MemoryWriteAtomic.
Such scheme with bool flags is not particularly scalable/extendable.
Because of that we did not have Read/Write wrappers for UnalignedMemoryAccess,
and "true, false" or "false, true" at call sites is not very readable.
Moreover, the new tsan runtime will introduce more flags
(e.g. move "freed" and "vptr access" to memory acccess flags).
We can't have 16 wrappers and each flag also takes whole
64-bit register for non-inlined calls.
Introduce AccessType enum that contains bit mask of
read/write, atomic/non-atomic, and later free/non-free,
vptr/non-vptr.
Such scheme is more scalable, more readble, more efficient
(don't consume multiple registers for these flags during calls)
and allows to cover unaligned and range variations of memory
access functions as well.
Also switch from size log to just size.
The new tsan runtime won't have the limitation of supporting
only 1/2/4/8 access sizes, so we don't need the logarithms.
Also add an inline thunk that converts the new interface to the old one.
For inlined calls it should not add any overhead because
all flags/size can be computed as compile time.
Reviewed By: vitalybuka, melver
Differential Revision: https://reviews.llvm.org/D107276
2021-08-02 17:04:43 +08:00
|
|
|
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
|
|
|
|
AccessType typ);
|
2021-11-13 00:43:26 +08:00
|
|
|
|
|
|
|
const int kSizeLog1 = 0;
|
|
|
|
const int kSizeLog2 = 1;
|
|
|
|
const int kSizeLog4 = 2;
|
|
|
|
const int kSizeLog8 = 3;
|
2013-02-01 17:42:06 +08:00
|
|
|
|
tsan: new MemoryAccess interface
Currently we have MemoryAccess function that accepts
"bool kAccessIsWrite, bool kIsAtomic" and 4 wrappers:
MemoryRead/MemoryWrite/MemoryReadAtomic/MemoryWriteAtomic.
Such scheme with bool flags is not particularly scalable/extendable.
Because of that we did not have Read/Write wrappers for UnalignedMemoryAccess,
and "true, false" or "false, true" at call sites is not very readable.
Moreover, the new tsan runtime will introduce more flags
(e.g. move "freed" and "vptr access" to memory acccess flags).
We can't have 16 wrappers and each flag also takes whole
64-bit register for non-inlined calls.
Introduce AccessType enum that contains bit mask of
read/write, atomic/non-atomic, and later free/non-free,
vptr/non-vptr.
Such scheme is more scalable, more readble, more efficient
(don't consume multiple registers for these flags during calls)
and allows to cover unaligned and range variations of memory
access functions as well.
Also switch from size log to just size.
The new tsan runtime won't have the limitation of supporting
only 1/2/4/8 access sizes, so we don't need the logarithms.
Also add an inline thunk that converts the new interface to the old one.
For inlined calls it should not add any overhead because
all flags/size can be computed as compile time.
Reviewed By: vitalybuka, melver
Differential Revision: https://reviews.llvm.org/D107276
2021-08-02 17:04:43 +08:00
|
|
|
ALWAYS_INLINE
|
2021-11-13 00:43:26 +08:00
|
|
|
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
|
|
|
|
AccessType typ) {
|
|
|
|
int size_log;
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
size_log = kSizeLog1;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
size_log = kSizeLog2;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
size_log = kSizeLog4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DCHECK_EQ(size, 8);
|
|
|
|
size_log = kSizeLog8;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bool is_write = !(typ & kAccessRead);
|
|
|
|
bool is_atomic = typ & kAccessAtomic;
|
|
|
|
if (typ & kAccessVptr)
|
|
|
|
thr->is_vptr_access = true;
|
|
|
|
if (typ & kAccessFree)
|
|
|
|
thr->is_freeing = true;
|
|
|
|
MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic);
|
|
|
|
if (typ & kAccessVptr)
|
|
|
|
thr->is_vptr_access = false;
|
|
|
|
if (typ & kAccessFree)
|
|
|
|
thr->is_freeing = false;
|
2013-02-01 17:42:06 +08:00
|
|
|
}
|
|
|
|
|
2021-04-27 19:55:41 +08:00
|
|
|
void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
2021-11-13 00:43:26 +08:00
|
|
|
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
2012-08-16 00:52:19 +08:00
|
|
|
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
2019-09-10 02:57:32 +08:00
|
|
|
void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
|
|
|
|
uptr size);
|
2013-10-10 23:58:12 +08:00
|
|
|
|
2021-07-29 00:33:18 +08:00
|
|
|
void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
|
|
|
|
void ThreadIgnoreEnd(ThreadState *thr);
|
|
|
|
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
|
|
|
|
void ThreadIgnoreSyncEnd(ThreadState *thr);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
void FuncEntry(ThreadState *thr, uptr pc);
|
|
|
|
void FuncExit(ThreadState *thr);
|
|
|
|
|
2021-07-30 19:50:15 +08:00
|
|
|
Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
|
|
|
|
void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
|
2019-02-07 19:01:22 +08:00
|
|
|
ThreadType thread_type);
|
2012-05-10 21:48:04 +08:00
|
|
|
void ThreadFinish(ThreadState *thr);
|
2021-07-30 19:50:15 +08:00
|
|
|
Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
|
|
|
|
void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
|
|
|
|
void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
|
2012-05-10 21:48:04 +08:00
|
|
|
void ThreadFinalize(ThreadState *thr);
|
2012-12-04 23:46:05 +08:00
|
|
|
void ThreadSetName(ThreadState *thr, const char *name);
|
2012-11-08 00:41:57 +08:00
|
|
|
int ThreadCount(ThreadState *thr);
|
2021-07-31 17:43:32 +08:00
|
|
|
void ProcessPendingSignalsImpl(ThreadState *thr);
|
2021-07-30 19:50:15 +08:00
|
|
|
void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
2016-04-27 16:23:02 +08:00
|
|
|
Processor *ProcCreate();
|
|
|
|
void ProcDestroy(Processor *proc);
|
|
|
|
void ProcWire(Processor *proc, ThreadState *thr);
|
|
|
|
void ProcUnwire(Processor *proc, ThreadState *thr);
|
|
|
|
|
2017-03-26 23:27:04 +08:00
|
|
|
// Note: the parameter is called flagz, because flags is already taken
|
|
|
|
// by the global function that returns flags.
|
|
|
|
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
2017-05-01 18:01:13 +08:00
|
|
|
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
2017-03-26 23:27:04 +08:00
|
|
|
void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
|
|
|
void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
|
|
|
|
int rec = 1);
|
|
|
|
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
|
|
|
void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
|
|
|
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
2012-05-10 21:48:04 +08:00
|
|
|
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
|
|
|
|
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
|
2013-11-16 00:58:12 +08:00
|
|
|
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
|
2016-03-16 23:39:20 +08:00
|
|
|
void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
void Acquire(ThreadState *thr, uptr pc, uptr addr);
|
2014-11-18 14:44:43 +08:00
|
|
|
// AcquireGlobal synchronizes the current thread with all other threads.
|
|
|
|
// In terms of happens-before relation, it draws a HB edge from all threads
|
|
|
|
// (where they happen to execute right now) to the current thread. We use it to
|
|
|
|
// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
|
|
|
|
// right before executing finalizers. This provides a coarse, but simple
|
|
|
|
// approximation of the actual required synchronization.
|
2021-07-29 00:33:18 +08:00
|
|
|
void AcquireGlobal(ThreadState *thr);
|
2012-05-10 21:48:04 +08:00
|
|
|
void Release(ThreadState *thr, uptr pc, uptr addr);
|
2020-03-24 17:27:08 +08:00
|
|
|
void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
|
2012-07-28 23:27:41 +08:00
|
|
|
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
|
2012-09-01 01:27:49 +08:00
|
|
|
void AfterSleep(ThreadState *thr, uptr pc);
|
2021-11-13 00:43:26 +08:00
|
|
|
void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
|
|
|
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
|
|
|
void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
|
|
|
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
|
|
|
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
|
2012-05-10 21:48:04 +08:00
|
|
|
|
|
|
|
// The hacky call uses custom calling convention and an assembly thunk.
|
|
|
|
// It is considerably faster that a normal call for the caller
|
|
|
|
// if it is not executed (it is intended for slow paths from hot functions).
|
|
|
|
// The trick is that the call preserves all registers and the compiler
|
|
|
|
// does not treat it as a call.
|
|
|
|
// If it does not work for you, use normal call.
|
2015-11-03 22:33:39 +08:00
|
|
|
#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
|
2012-05-10 21:48:04 +08:00
|
|
|
// The caller may not create the stack frame for itself at all,
|
|
|
|
// so we create a reserve stack frame for it (1024b must be enough).
|
|
|
|
#define HACKY_CALL(f) \
|
2012-09-02 19:24:07 +08:00
|
|
|
__asm__ __volatile__("sub $1024, %%rsp;" \
|
2013-12-05 15:44:35 +08:00
|
|
|
CFI_INL_ADJUST_CFA_OFFSET(1024) \
|
2012-11-26 22:20:26 +08:00
|
|
|
".hidden " #f "_thunk;" \
|
2012-05-10 21:48:04 +08:00
|
|
|
"call " #f "_thunk;" \
|
2012-09-02 19:24:07 +08:00
|
|
|
"add $1024, %%rsp;" \
|
2013-12-05 15:44:35 +08:00
|
|
|
CFI_INL_ADJUST_CFA_OFFSET(-1024) \
|
2012-09-02 19:24:07 +08:00
|
|
|
::: "memory", "cc");
|
2012-05-10 21:48:04 +08:00
|
|
|
#else
|
|
|
|
#define HACKY_CALL(f) f()
|
|
|
|
#endif
|
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
void TraceSwitch(ThreadState *thr);
|
|
|
|
uptr TraceTopPC(ThreadState *thr);
|
|
|
|
uptr TraceSize();
|
|
|
|
uptr TraceParts();
|
|
|
|
Trace *ThreadTrace(Tid tid);
|
|
|
|
|
|
|
|
extern "C" void __tsan_trace_switch();
|
|
|
|
void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
|
|
|
|
EventType typ, u64 addr) {
|
|
|
|
if (!kCollectHistory)
|
|
|
|
return;
|
2021-11-13 02:28:39 +08:00
|
|
|
// TraceSwitch accesses shadow_stack, but it's called infrequently,
|
|
|
|
// so we check it here proactively.
|
|
|
|
DCHECK(thr->shadow_stack);
|
2021-11-13 00:43:26 +08:00
|
|
|
DCHECK_GE((int)typ, 0);
|
|
|
|
DCHECK_LE((int)typ, 7);
|
|
|
|
DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
|
|
|
|
u64 pos = fs.GetTracePos();
|
|
|
|
if (UNLIKELY((pos % kTracePartSize) == 0)) {
|
|
|
|
#if !SANITIZER_GO
|
|
|
|
HACKY_CALL(__tsan_trace_switch);
|
|
|
|
#else
|
|
|
|
TraceSwitch(thr);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
Event *trace = (Event*)GetThreadTrace(fs.tid());
|
|
|
|
Event *evp = &trace[pos];
|
|
|
|
Event ev = (u64)addr | ((u64)typ << kEventPCBits);
|
|
|
|
*evp = ev;
|
|
|
|
}
|
|
|
|
|
2016-10-29 04:14:18 +08:00
|
|
|
#if !SANITIZER_GO
|
2015-02-20 14:42:41 +08:00
|
|
|
uptr ALWAYS_INLINE HeapEnd() {
|
2015-11-26 21:10:47 +08:00
|
|
|
return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
|
2015-02-20 14:42:41 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-13 21:21:24 +08:00
|
|
|
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
|
|
|
|
void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
|
|
|
|
void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
|
|
|
|
|
|
|
|
// These need to match __tsan_switch_to_fiber_* flags defined in
|
|
|
|
// tsan_interface.h. See documentation there as well.
|
|
|
|
enum FiberSwitchFlags {
|
|
|
|
FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
|
|
|
|
};
|
|
|
|
|
2021-07-31 17:43:32 +08:00
|
|
|
ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
|
|
|
|
if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
|
|
|
|
ProcessPendingSignalsImpl(thr);
|
|
|
|
}
|
|
|
|
|
2021-07-29 19:50:29 +08:00
|
|
|
extern bool is_initialized;
|
|
|
|
|
|
|
|
ALWAYS_INLINE
|
|
|
|
void LazyInitialize(ThreadState *thr) {
|
|
|
|
// If we can use .preinit_array, assume that __tsan_init
|
|
|
|
// called from .preinit_array initializes runtime before
|
|
|
|
// any instrumented code.
|
|
|
|
#if !SANITIZER_CAN_USE_PREINIT_ARRAY
|
|
|
|
if (UNLIKELY(!is_initialized))
|
|
|
|
Initialize(thr);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
namespace v3 {
|
|
|
|
|
2021-08-05 23:18:17 +08:00
|
|
|
void TraceSwitchPart(ThreadState *thr);
|
2021-11-13 00:43:26 +08:00
|
|
|
bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
|
|
|
|
uptr size, AccessType typ, VarSizeStackTrace *pstk,
|
2021-08-05 23:18:17 +08:00
|
|
|
MutexSet *pmset, uptr *ptag);
|
|
|
|
|
|
|
|
template <typename EventT>
|
|
|
|
ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
|
|
|
|
EventT **ev) {
|
|
|
|
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
|
|
|
|
#if SANITIZER_DEBUG
|
|
|
|
// TraceSwitch acquires these mutexes,
|
|
|
|
// so we lock them here to detect deadlocks more reliably.
|
|
|
|
{ Lock lock(&ctx->slot_mtx); }
|
|
|
|
{ Lock lock(&thr->tctx->trace.mtx); }
|
|
|
|
TracePart *current = thr->tctx->trace.parts.Back();
|
|
|
|
if (current) {
|
|
|
|
DCHECK_GE(pos, ¤t->events[0]);
|
|
|
|
DCHECK_LE(pos, ¤t->events[TracePart::kSize]);
|
|
|
|
} else {
|
|
|
|
DCHECK_EQ(pos, nullptr);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
// TracePart is allocated with mmap and is at least 4K aligned.
|
|
|
|
// So the following check is a faster way to check for part end.
|
|
|
|
// It may have false positives in the middle of the trace,
|
|
|
|
// they are filtered out in TraceSwitch.
|
|
|
|
if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
|
|
|
|
return false;
|
|
|
|
*ev = reinterpret_cast<EventT *>(pos);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename EventT>
|
|
|
|
ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
|
|
|
|
DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
|
|
|
|
atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename EventT>
|
|
|
|
void TraceEvent(ThreadState *thr, EventT ev) {
|
|
|
|
EventT *evp;
|
|
|
|
if (!TraceAcquire(thr, &evp)) {
|
|
|
|
TraceSwitchPart(thr);
|
|
|
|
UNUSED bool res = TraceAcquire(thr, &evp);
|
|
|
|
DCHECK(res);
|
|
|
|
}
|
|
|
|
*evp = ev;
|
|
|
|
TraceRelease(thr, evp);
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
|
|
|
|
uptr pc = 0) {
|
|
|
|
if (!kCollectHistory)
|
|
|
|
return true;
|
|
|
|
EventFunc *ev;
|
|
|
|
if (UNLIKELY(!TraceAcquire(thr, &ev)))
|
|
|
|
return false;
|
|
|
|
ev->is_access = 0;
|
|
|
|
ev->is_func = 1;
|
|
|
|
ev->pc = pc;
|
|
|
|
TraceRelease(thr, ev);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_UNUSED_RESULT
|
|
|
|
bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
|
|
|
|
AccessType typ);
|
|
|
|
WARN_UNUSED_RESULT
|
|
|
|
bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
|
|
|
|
AccessType typ);
|
|
|
|
void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
|
|
|
|
AccessType typ);
|
|
|
|
void TraceFunc(ThreadState *thr, uptr pc = 0);
|
|
|
|
void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
|
|
|
|
StackID stk);
|
|
|
|
void TraceMutexUnlock(ThreadState *thr, uptr addr);
|
|
|
|
void TraceTime(ThreadState *thr);
|
|
|
|
|
2021-11-13 00:43:26 +08:00
|
|
|
} // namespace v3
|
2021-08-05 23:18:17 +08:00
|
|
|
|
2021-10-27 22:00:23 +08:00
|
|
|
void GrowShadowStack(ThreadState *thr);
|
|
|
|
|
|
|
|
ALWAYS_INLINE
|
|
|
|
void FuncEntry(ThreadState *thr, uptr pc) {
|
2021-11-13 00:43:26 +08:00
|
|
|
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void *)pc);
|
|
|
|
if (kCollectHistory) {
|
|
|
|
thr->fast_state.IncrementEpoch();
|
|
|
|
TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shadow stack maintenance can be replaced with
|
|
|
|
// stack unwinding during trace switch (which presumably must be faster).
|
2021-10-27 22:00:23 +08:00
|
|
|
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
|
|
|
|
#if !SANITIZER_GO
|
|
|
|
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
|
|
|
|
#else
|
|
|
|
if (thr->shadow_stack_pos == thr->shadow_stack_end)
|
|
|
|
GrowShadowStack(thr);
|
|
|
|
#endif
|
|
|
|
thr->shadow_stack_pos[0] = pc;
|
|
|
|
thr->shadow_stack_pos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE
|
|
|
|
void FuncExit(ThreadState *thr) {
|
2021-11-13 00:43:26 +08:00
|
|
|
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
|
|
|
|
if (kCollectHistory) {
|
|
|
|
thr->fast_state.IncrementEpoch();
|
|
|
|
TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
|
|
|
|
}
|
|
|
|
|
2021-10-27 22:00:23 +08:00
|
|
|
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
|
|
|
|
#if !SANITIZER_GO
|
|
|
|
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
|
|
|
|
#endif
|
|
|
|
thr->shadow_stack_pos--;
|
|
|
|
}
|
|
|
|
|
2021-09-21 21:22:35 +08:00
|
|
|
#if !SANITIZER_GO
|
|
|
|
extern void (*on_initialize)(void);
|
|
|
|
extern int (*on_finalize)(int);
|
|
|
|
#endif
|
2021-11-13 00:43:26 +08:00
|
|
|
|
2012-05-10 21:48:04 +08:00
|
|
|
} // namespace __tsan
|
|
|
|
|
|
|
|
#endif // TSAN_RTL_H
|