2019-08-01 20:30:58 +08:00
|
|
|
//===-- xray_buffer_queue.cpp ----------------------------------*- C++ -*-===//
|
2016-12-06 14:24:08 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-12-06 14:24:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2021-09-04 15:31:34 +08:00
|
|
|
// This file is a part of XRay, a dynamic runtime instrumentation system.
|
2016-12-06 14:24:08 +08:00
|
|
|
//
|
|
|
|
// Defines the interface for a buffer queue implementation.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "xray_buffer_queue.h"
|
2018-10-28 17:12:43 +08:00
|
|
|
#include "sanitizer_common/sanitizer_atomic.h"
|
2017-03-22 12:40:32 +08:00
|
|
|
#include "sanitizer_common/sanitizer_common.h"
|
|
|
|
#include "sanitizer_common/sanitizer_libc.h"
|
2018-11-22 10:00:44 +08:00
|
|
|
#if !SANITIZER_FUCHSIA
|
2018-07-30 13:56:42 +08:00
|
|
|
#include "sanitizer_common/sanitizer_posix.h"
|
2018-11-22 10:00:44 +08:00
|
|
|
#endif
|
2018-09-17 11:09:01 +08:00
|
|
|
#include "xray_allocator.h"
|
|
|
|
#include "xray_defs.h"
|
2018-06-05 11:46:54 +08:00
|
|
|
#include <memory>
|
2018-07-30 13:56:42 +08:00
|
|
|
#include <sys/mman.h>
|
|
|
|
|
2016-12-06 14:24:08 +08:00
|
|
|
using namespace __xray;
|
|
|
|
|
2018-10-28 17:12:43 +08:00
|
|
|
namespace {
|
|
|
|
|
2018-10-29 10:18:14 +08:00
|
|
|
BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {
|
|
|
|
auto B =
|
|
|
|
allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
|
|
|
|
return B == nullptr ? nullptr
|
|
|
|
: reinterpret_cast<BufferQueue::ControlBlock *>(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,
|
|
|
|
size_t Count) {
|
|
|
|
deallocateBuffer(reinterpret_cast<unsigned char *>(C),
|
|
|
|
(sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
|
|
|
|
}
|
|
|
|
|
|
|
|
void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {
|
|
|
|
if (C == nullptr)
|
2018-10-28 17:12:43 +08:00
|
|
|
return;
|
2018-10-29 10:18:14 +08:00
|
|
|
if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)
|
|
|
|
deallocControlBlock(C, Size, Count);
|
2018-10-28 17:12:43 +08:00
|
|
|
}
|
|
|
|
|
2018-10-29 10:18:14 +08:00
|
|
|
void incRefCount(BufferQueue::ControlBlock *C) {
|
|
|
|
if (C == nullptr)
|
2018-10-28 17:12:43 +08:00
|
|
|
return;
|
2018-10-29 10:18:14 +08:00
|
|
|
atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);
|
2018-10-28 17:12:43 +08:00
|
|
|
}
|
|
|
|
|
2018-11-20 09:00:26 +08:00
|
|
|
// We use a struct to ensure that we are allocating one atomic_uint64_t per
|
|
|
|
// cache line. This allows us to not worry about false-sharing among atomic
|
|
|
|
// objects being updated (constantly) by different threads.
|
|
|
|
struct ExtentsPadded {
|
|
|
|
union {
|
|
|
|
atomic_uint64_t Extents;
|
|
|
|
unsigned char Storage[kCacheLineSize];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
constexpr size_t kExtentsSize = sizeof(ExtentsPadded);
|
|
|
|
|
2018-10-28 17:12:43 +08:00
|
|
|
} // namespace
|
|
|
|
|
2018-10-22 12:53:58 +08:00
|
|
|
BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
|
|
|
|
SpinMutexLock Guard(&Mutex);
|
|
|
|
|
|
|
|
if (!finalizing())
|
|
|
|
return BufferQueue::ErrorCode::AlreadyInitialized;
|
|
|
|
|
2018-10-28 17:12:43 +08:00
|
|
|
cleanupBuffers();
|
|
|
|
|
2018-10-22 12:53:58 +08:00
|
|
|
bool Success = false;
|
|
|
|
BufferSize = BS;
|
|
|
|
BufferCount = BC;
|
2018-10-29 10:18:14 +08:00
|
|
|
|
|
|
|
BackingStore = allocControlBlock(BufferSize, BufferCount);
|
2018-10-22 12:53:58 +08:00
|
|
|
if (BackingStore == nullptr)
|
|
|
|
return BufferQueue::ErrorCode::NotEnoughMemory;
|
|
|
|
|
2018-11-20 09:00:26 +08:00
|
|
|
auto CleanupBackingStore = at_scope_exit([&, this] {
|
2018-10-22 12:53:58 +08:00
|
|
|
if (Success)
|
|
|
|
return;
|
2018-10-29 10:18:14 +08:00
|
|
|
deallocControlBlock(BackingStore, BufferSize, BufferCount);
|
2018-10-28 17:12:43 +08:00
|
|
|
BackingStore = nullptr;
|
2018-10-22 12:53:58 +08:00
|
|
|
});
|
|
|
|
|
2018-11-20 09:00:26 +08:00
|
|
|
// Initialize enough atomic_uint64_t instances, each
|
|
|
|
ExtentsBackingStore = allocControlBlock(kExtentsSize, BufferCount);
|
|
|
|
if (ExtentsBackingStore == nullptr)
|
|
|
|
return BufferQueue::ErrorCode::NotEnoughMemory;
|
|
|
|
|
|
|
|
auto CleanupExtentsBackingStore = at_scope_exit([&, this] {
|
|
|
|
if (Success)
|
|
|
|
return;
|
|
|
|
deallocControlBlock(ExtentsBackingStore, kExtentsSize, BufferCount);
|
|
|
|
ExtentsBackingStore = nullptr;
|
|
|
|
});
|
|
|
|
|
2018-10-22 12:53:58 +08:00
|
|
|
Buffers = initArray<BufferRep>(BufferCount);
|
|
|
|
if (Buffers == nullptr)
|
|
|
|
return BufferQueue::ErrorCode::NotEnoughMemory;
|
2018-10-19 12:09:32 +08:00
|
|
|
|
2018-10-22 12:53:58 +08:00
|
|
|
// At this point we increment the generation number to associate the buffers
|
|
|
|
// to the new generation.
|
|
|
|
atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
|
|
|
|
|
2018-10-29 10:18:14 +08:00
|
|
|
// First, we initialize the refcount in the ControlBlock, which we treat as
|
|
|
|
// being at the start of the BackingStore pointer.
|
|
|
|
atomic_store(&BackingStore->RefCount, 1, memory_order_release);
|
2018-11-20 09:00:26 +08:00
|
|
|
atomic_store(&ExtentsBackingStore->RefCount, 1, memory_order_release);
|
2018-10-28 17:12:43 +08:00
|
|
|
|
2018-10-29 10:18:14 +08:00
|
|
|
// Then we initialise the individual buffers that sub-divide the whole backing
|
|
|
|
// store. Each buffer will start at the `Data` member of the ControlBlock, and
|
|
|
|
// will be offsets from these locations.
|
2018-10-22 12:53:58 +08:00
|
|
|
for (size_t i = 0; i < BufferCount; ++i) {
|
2018-10-19 12:09:32 +08:00
|
|
|
auto &T = Buffers[i];
|
|
|
|
auto &Buf = T.Buff;
|
2018-11-20 09:00:26 +08:00
|
|
|
auto *E = reinterpret_cast<ExtentsPadded *>(&ExtentsBackingStore->Data +
|
|
|
|
(kExtentsSize * i));
|
|
|
|
Buf.Extents = &E->Extents;
|
|
|
|
atomic_store(Buf.Extents, 0, memory_order_release);
|
2018-10-22 12:53:58 +08:00
|
|
|
Buf.Generation = generation();
|
2018-10-29 10:18:14 +08:00
|
|
|
Buf.Data = &BackingStore->Data + (BufferSize * i);
|
2018-10-22 12:53:58 +08:00
|
|
|
Buf.Size = BufferSize;
|
2018-10-28 17:12:43 +08:00
|
|
|
Buf.BackingStore = BackingStore;
|
2018-11-20 09:00:26 +08:00
|
|
|
Buf.ExtentsBackingStore = ExtentsBackingStore;
|
2018-10-28 17:12:43 +08:00
|
|
|
Buf.Count = BufferCount;
|
2018-10-19 12:09:32 +08:00
|
|
|
T.Used = false;
|
|
|
|
}
|
2018-10-22 12:53:58 +08:00
|
|
|
|
|
|
|
Next = Buffers;
|
|
|
|
First = Buffers;
|
|
|
|
LiveBuffers = 0;
|
|
|
|
atomic_store(&Finalizing, 0, memory_order_release);
|
2018-10-29 10:18:14 +08:00
|
|
|
Success = true;
|
2018-10-22 12:53:58 +08:00
|
|
|
return BufferQueue::ErrorCode::Ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
BufferQueue::BufferQueue(size_t B, size_t N,
|
|
|
|
bool &Success) XRAY_NEVER_INSTRUMENT
|
|
|
|
: BufferSize(B),
|
|
|
|
BufferCount(N),
|
|
|
|
Mutex(),
|
|
|
|
Finalizing{1},
|
|
|
|
BackingStore(nullptr),
|
2018-11-20 09:00:26 +08:00
|
|
|
ExtentsBackingStore(nullptr),
|
2018-10-22 12:53:58 +08:00
|
|
|
Buffers(nullptr),
|
|
|
|
Next(Buffers),
|
|
|
|
First(Buffers),
|
|
|
|
LiveBuffers(0),
|
|
|
|
Generation{0} {
|
|
|
|
Success = init(B, N) == BufferQueue::ErrorCode::Ok;
|
2016-12-06 14:24:08 +08:00
|
|
|
}
|
|
|
|
|
2017-03-22 12:40:32 +08:00
|
|
|
BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
|
2018-06-05 14:12:42 +08:00
|
|
|
if (atomic_load(&Finalizing, memory_order_acquire))
|
2017-03-22 12:40:32 +08:00
|
|
|
return ErrorCode::QueueFinalizing;
|
2018-09-17 11:09:01 +08:00
|
|
|
|
2018-10-22 12:53:58 +08:00
|
|
|
BufferRep *B = nullptr;
|
|
|
|
{
|
|
|
|
SpinMutexLock Guard(&Mutex);
|
|
|
|
if (LiveBuffers == BufferCount)
|
|
|
|
return ErrorCode::NotEnoughMemory;
|
|
|
|
B = Next++;
|
|
|
|
if (Next == (Buffers + BufferCount))
|
|
|
|
Next = Buffers;
|
|
|
|
++LiveBuffers;
|
|
|
|
}
|
2017-10-04 13:20:13 +08:00
|
|
|
|
2018-10-28 17:12:43 +08:00
|
|
|
incRefCount(BackingStore);
|
2018-11-20 09:00:26 +08:00
|
|
|
incRefCount(ExtentsBackingStore);
|
2018-10-29 10:18:14 +08:00
|
|
|
Buf = B->Buff;
|
2018-10-22 12:53:58 +08:00
|
|
|
Buf.Generation = generation();
|
|
|
|
B->Used = true;
|
2017-03-22 12:40:32 +08:00
|
|
|
return ErrorCode::Ok;
|
2016-12-06 14:24:08 +08:00
|
|
|
}
|
|
|
|
|
2017-03-22 12:40:32 +08:00
|
|
|
BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
|
2018-09-17 11:09:01 +08:00
|
|
|
// Check whether the buffer being referred to is within the bounds of the
|
|
|
|
// backing store's range.
|
2018-10-22 12:53:58 +08:00
|
|
|
BufferRep *B = nullptr;
|
|
|
|
{
|
|
|
|
SpinMutexLock Guard(&Mutex);
|
2018-10-29 10:18:14 +08:00
|
|
|
if (Buf.Generation != generation() || LiveBuffers == 0) {
|
|
|
|
Buf = {};
|
2018-10-28 17:12:43 +08:00
|
|
|
decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
|
2018-11-20 09:00:26 +08:00
|
|
|
decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
|
2018-10-29 10:18:14 +08:00
|
|
|
return BufferQueue::ErrorCode::Ok;
|
2018-10-22 12:53:58 +08:00
|
|
|
}
|
2017-10-04 13:20:13 +08:00
|
|
|
|
2018-10-29 10:18:14 +08:00
|
|
|
if (Buf.Data < &BackingStore->Data ||
|
|
|
|
Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))
|
|
|
|
return BufferQueue::ErrorCode::UnrecognizedBuffer;
|
|
|
|
|
2018-10-22 12:53:58 +08:00
|
|
|
--LiveBuffers;
|
|
|
|
B = First++;
|
|
|
|
if (First == (Buffers + BufferCount))
|
|
|
|
First = Buffers;
|
|
|
|
}
|
[XRay][compiler-rt] XRay Flight Data Recorder Mode
Summary:
In this change we introduce the notion of a "flight data recorder" mode
for XRay logging, where XRay logs in-memory first, and write out data
on-demand as required (as opposed to the naive implementation that keeps
logging while tracing is "on"). This depends on D26232 where we
implement the core data structure for holding the buffers that threads
will be using to write out records of operation.
This implementation only currently works on x86_64 and depends heavily
on the TSC math to write out smaller records to the inmemory buffers.
Also, this implementation defines two different kinds of records with
different sizes (compared to the current naive implementation): a
MetadataRecord (16 bytes) and a FunctionRecord (8 bytes). MetadataRecord
entries are meant to write out information like the thread ID for which
the metadata record is defined for, whether the execution of a thread
moved to a different CPU, etc. while a FunctionRecord represents the
different kinds of function call entry/exit records we might encounter
in the course of a thread's execution along with a delta from the last
time the logging handler was called.
While this implementation is not exactly what is described in the
original XRay whitepaper, this one gives us an initial implementation
that we can iterate and build upon.
Reviewers: echristo, rSerge, majnemer
Subscribers: mehdi_amini, llvm-commits, mgorny
Differential Revision: https://reviews.llvm.org/D27038
llvm-svn: 293015
2017-01-25 11:50:46 +08:00
|
|
|
|
|
|
|
// Now that the buffer has been released, we mark it as "used".
|
2018-10-29 10:18:14 +08:00
|
|
|
B->Buff = Buf;
|
2018-10-22 12:53:58 +08:00
|
|
|
B->Used = true;
|
2018-10-28 17:12:43 +08:00
|
|
|
decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
|
2018-11-20 09:00:26 +08:00
|
|
|
decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
|
|
|
|
atomic_store(B->Buff.Extents, atomic_load(Buf.Extents, memory_order_acquire),
|
2018-10-22 12:53:58 +08:00
|
|
|
memory_order_release);
|
2018-10-29 10:18:14 +08:00
|
|
|
Buf = {};
|
2017-03-22 12:40:32 +08:00
|
|
|
return ErrorCode::Ok;
|
2016-12-06 14:24:08 +08:00
|
|
|
}
|
|
|
|
|
2017-03-22 12:40:32 +08:00
|
|
|
BufferQueue::ErrorCode BufferQueue::finalize() {
|
2018-06-05 14:12:42 +08:00
|
|
|
if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel))
|
2017-03-22 12:40:32 +08:00
|
|
|
return ErrorCode::QueueFinalizing;
|
|
|
|
return ErrorCode::Ok;
|
2016-12-06 14:24:08 +08:00
|
|
|
}
|
|
|
|
|
2018-10-28 17:12:43 +08:00
|
|
|
void BufferQueue::cleanupBuffers() {
|
2018-06-05 11:46:54 +08:00
|
|
|
for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B)
|
|
|
|
B->~BufferRep();
|
2018-09-17 11:09:01 +08:00
|
|
|
deallocateBuffer(Buffers, BufferCount);
|
2018-10-28 17:12:43 +08:00
|
|
|
decRefCount(BackingStore, BufferSize, BufferCount);
|
2018-11-20 09:00:26 +08:00
|
|
|
decRefCount(ExtentsBackingStore, kExtentsSize, BufferCount);
|
2018-10-28 17:12:43 +08:00
|
|
|
BackingStore = nullptr;
|
2018-11-20 09:00:26 +08:00
|
|
|
ExtentsBackingStore = nullptr;
|
2018-10-28 17:12:43 +08:00
|
|
|
Buffers = nullptr;
|
|
|
|
BufferCount = 0;
|
|
|
|
BufferSize = 0;
|
2016-12-06 14:24:08 +08:00
|
|
|
}
|
2018-10-28 17:12:43 +08:00
|
|
|
|
|
|
|
BufferQueue::~BufferQueue() { cleanupBuffers(); }
|