Don't disallow allocation tracking when a trace event is open because we now have state trace events. Instead, only block allocation tracking while we are in the middle of allocation tracking already to prevent recursion.

This commit is contained in:
A.J. Beamon 2020-03-12 11:17:49 -07:00
parent 53d4798c75
commit 2466749648
4 changed files with 8 additions and 9 deletions

View File

@ -184,9 +184,11 @@ ArenaBlock* ArenaBlock::create(int dataSize, Reference<ArenaBlock>& next) {
b->bigSize = reqSize;
b->bigUsed = sizeof(ArenaBlock);
if (FLOW_KNOBS && g_trace_depth == 0 &&
if (FLOW_KNOBS && !g_tracing_allocation &&
nondeterministicRandom()->random01() < (reqSize / FLOW_KNOBS->HUGE_ARENA_LOGGING_BYTES)) {
g_tracing_allocation = true;
hugeArenaSample(reqSize);
g_tracing_allocation = false;
}
g_hugeArenaMemory.fetch_add(reqSize);

View File

@ -445,8 +445,10 @@ void FastAllocator<Size>::getMagazine() {
// FIXME: We should be able to allocate larger magazine sizes here if we
// detect that the underlying system supports hugepages. Using hugepages
// with smaller-than-2MiB magazine sizes strands memory. See issue #909.
if(FLOW_KNOBS && g_trace_depth == 0 && nondeterministicRandom()->random01() < (magazine_size * Size)/FLOW_KNOBS->FAST_ALLOC_LOGGING_BYTES) {
if(FLOW_KNOBS && !g_tracing_allocation && nondeterministicRandom()->random01() < (magazine_size * Size)/FLOW_KNOBS->FAST_ALLOC_LOGGING_BYTES) {
g_tracing_allocation = true;
TraceEvent("GetMagazineSample").detail("Size", Size).backtrace();
g_tracing_allocation = false;
}
block = (void **)::allocate(magazine_size * Size, false);
#endif

View File

@ -43,7 +43,7 @@
#undef min
#endif
thread_local int g_trace_depth = 0;
thread_local bool g_tracing_allocation = false;
class DummyThreadPool : public IThreadPool, ReferenceCounted<DummyThreadPool> {
public:
@ -698,14 +698,12 @@ TraceEvent& TraceEvent::operator=(TraceEvent &&ev) {
}
TraceEvent::TraceEvent( const char* type, UID id ) : id(id), type(type), severity(SevInfo), initialized(false), enabled(true), logged(false) {
g_trace_depth++;
setMaxFieldLength(0);
setMaxEventLength(0);
}
TraceEvent::TraceEvent( Severity severity, const char* type, UID id )
: id(id), type(type), severity(severity), initialized(false), logged(false),
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {
g_trace_depth++;
setMaxFieldLength(0);
setMaxEventLength(0);
}
@ -715,7 +713,6 @@ TraceEvent::TraceEvent( TraceInterval& interval, UID id )
initialized(false), logged(false),
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= interval.severity) {
g_trace_depth++;
setMaxFieldLength(0);
setMaxEventLength(0);
@ -727,7 +724,6 @@ TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id )
initialized(false), logged(false),
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {
g_trace_depth++;
setMaxFieldLength(0);
setMaxEventLength(0);
@ -1014,7 +1010,6 @@ void TraceEvent::log() {
TraceEvent(SevError, "TraceEventLoggingError").error(e,true);
}
delete tmpEventMetric;
g_trace_depth--;
logged = true;
}
}

View File

@ -43,7 +43,7 @@ inline int fastrand() {
//inline static bool TRACE_SAMPLE() { return fastrand()<16; }
inline static bool TRACE_SAMPLE() { return false; }
extern thread_local int g_trace_depth;
extern thread_local bool g_tracing_allocation;
enum Severity {
SevSample=1,