2015-12-11 04:24:09 +08:00
|
|
|
/*===- InstrProfilingValue.c - Support library for PGO instrumentation ----===*\
|
|
|
|
|*
|
|
|
|
|* The LLVM Compiler Infrastructure
|
|
|
|
|*
|
|
|
|
|* This file is distributed under the University of Illinois Open Source
|
|
|
|
|* License. See LICENSE.TXT for details.
|
|
|
|
|*
|
|
|
|
\*===----------------------------------------------------------------------===*/
|
|
|
|
|
|
|
|
#include <limits.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2017-12-15 03:01:04 +08:00
|
|
|
|
|
|
|
#include "InstrProfiling.h"
|
|
|
|
#include "InstrProfilingInternal.h"
|
|
|
|
#include "InstrProfilingUtil.h"
|
|
|
|
|
2015-12-11 04:24:09 +08:00
|
|
|
#define INSTR_PROF_VALUE_PROF_DATA
|
|
|
|
#define INSTR_PROF_COMMON_API_IMPL
|
|
|
|
#include "InstrProfData.inc"
|
|
|
|
|
2016-05-27 00:06:36 +08:00
|
|
|
static int hasStaticCounters = 1;
|
|
|
|
static int OutOfNodesWarnings = 0;
|
|
|
|
static int hasNonDefaultValsPerSite = 0;
|
|
|
|
#define INSTR_PROF_MAX_VP_WARNS 10
|
2017-07-28 23:00:30 +08:00
|
|
|
#define INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE 16
|
2016-05-27 00:06:36 +08:00
|
|
|
#define INSTR_PROF_VNODE_POOL_SIZE 1024
|
|
|
|
|
2016-05-27 00:21:11 +08:00
|
|
|
#ifndef _MSC_VER
|
2016-05-27 00:06:36 +08:00
|
|
|
/* A shared static pool in addition to the vnodes statically
|
|
|
|
* allocated by the compiler. */
|
|
|
|
COMPILER_RT_VISIBILITY ValueProfNode
|
|
|
|
lprofValueProfNodes[INSTR_PROF_VNODE_POOL_SIZE] COMPILER_RT_SECTION(
|
2016-05-27 06:15:12 +08:00
|
|
|
COMPILER_RT_SEG INSTR_PROF_VNODES_SECT_NAME_STR);
|
2016-05-27 00:21:11 +08:00
|
|
|
#endif
|
2016-05-27 00:06:36 +08:00
|
|
|
|
2016-05-19 06:34:05 +08:00
|
|
|
COMPILER_RT_VISIBILITY uint32_t VPMaxNumValsPerSite =
|
2016-05-27 00:06:36 +08:00
|
|
|
INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE;
|
2016-05-19 06:34:05 +08:00
|
|
|
|
|
|
|
COMPILER_RT_VISIBILITY void lprofSetupValueProfiler() {
|
|
|
|
const char *Str = 0;
|
|
|
|
Str = getenv("LLVM_VP_MAX_NUM_VALS_PER_SITE");
|
2016-05-27 00:06:36 +08:00
|
|
|
if (Str && Str[0]) {
|
2016-05-19 06:34:05 +08:00
|
|
|
VPMaxNumValsPerSite = atoi(Str);
|
2016-05-27 00:06:36 +08:00
|
|
|
hasNonDefaultValsPerSite = 1;
|
|
|
|
}
|
2016-05-19 06:34:05 +08:00
|
|
|
if (VPMaxNumValsPerSite > INSTR_PROF_MAX_NUM_VAL_PER_SITE)
|
|
|
|
VPMaxNumValsPerSite = INSTR_PROF_MAX_NUM_VAL_PER_SITE;
|
|
|
|
}
|
|
|
|
|
2016-05-20 05:35:34 +08:00
|
|
|
COMPILER_RT_VISIBILITY void lprofSetMaxValsPerSite(uint32_t MaxVals) {
|
|
|
|
VPMaxNumValsPerSite = MaxVals;
|
2016-05-27 00:06:36 +08:00
|
|
|
hasNonDefaultValsPerSite = 1;
|
2016-05-20 05:35:34 +08:00
|
|
|
}
|
|
|
|
|
2015-12-11 04:24:09 +08:00
|
|
|
/* This method is only used in value profiler mock testing. */
|
2015-12-16 11:29:15 +08:00
|
|
|
COMPILER_RT_VISIBILITY void
|
2015-12-11 04:24:09 +08:00
|
|
|
__llvm_profile_set_num_value_sites(__llvm_profile_data *Data,
|
|
|
|
uint32_t ValueKind, uint16_t NumValueSites) {
|
|
|
|
*((uint16_t *)&Data->NumValueSites[ValueKind]) = NumValueSites;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This method is only used in value profiler mock testing. */
|
2015-12-16 11:29:15 +08:00
|
|
|
COMPILER_RT_VISIBILITY const __llvm_profile_data *
|
2015-12-11 04:24:09 +08:00
|
|
|
__llvm_profile_iterate_data(const __llvm_profile_data *Data) {
|
|
|
|
return Data + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This method is only used in value profiler mock testing. */
|
2015-12-16 11:29:15 +08:00
|
|
|
COMPILER_RT_VISIBILITY void *
|
2015-12-11 04:24:09 +08:00
|
|
|
__llvm_get_function_addr(const __llvm_profile_data *Data) {
|
|
|
|
return Data->FunctionPointer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate an array that holds the pointers to the linked lists of
|
|
|
|
* value profile counter nodes. The number of element of the array
|
|
|
|
* is the total number of value profile sites instrumented. Returns
|
|
|
|
* 0 if allocation fails.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int allocateValueProfileCounters(__llvm_profile_data *Data) {
|
|
|
|
uint64_t NumVSites = 0;
|
|
|
|
uint32_t VKI;
|
2016-05-23 00:36:03 +08:00
|
|
|
|
|
|
|
/* This function will never be called when value site array is allocated
|
|
|
|
statically at compile time. */
|
|
|
|
hasStaticCounters = 0;
|
2016-05-27 00:06:36 +08:00
|
|
|
/* When dynamic allocation is enabled, allow tracking the max number of
|
|
|
|
* values allowd. */
|
|
|
|
if (!hasNonDefaultValsPerSite)
|
|
|
|
VPMaxNumValsPerSite = INSTR_PROF_MAX_NUM_VAL_PER_SITE;
|
2016-05-23 00:36:03 +08:00
|
|
|
|
2015-12-11 04:24:09 +08:00
|
|
|
for (VKI = IPVK_First; VKI <= IPVK_Last; ++VKI)
|
|
|
|
NumVSites += Data->NumValueSites[VKI];
|
|
|
|
|
|
|
|
ValueProfNode **Mem =
|
|
|
|
(ValueProfNode **)calloc(NumVSites, sizeof(ValueProfNode *));
|
|
|
|
if (!Mem)
|
|
|
|
return 0;
|
2015-12-16 11:29:15 +08:00
|
|
|
if (!COMPILER_RT_BOOL_CMPXCHG(&Data->Values, 0, Mem)) {
|
2015-12-11 04:24:09 +08:00
|
|
|
free(Mem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-05-22 06:55:45 +08:00
|
|
|
static ValueProfNode *allocateOneNode(__llvm_profile_data *Data, uint32_t Index,
|
|
|
|
uint64_t Value) {
|
|
|
|
ValueProfNode *Node;
|
|
|
|
|
2016-05-23 00:36:03 +08:00
|
|
|
if (!hasStaticCounters)
|
2016-05-22 06:55:45 +08:00
|
|
|
return (ValueProfNode *)calloc(1, sizeof(ValueProfNode));
|
|
|
|
|
2016-05-26 05:27:02 +08:00
|
|
|
/* Early check to avoid value wrapping around. */
|
2016-06-01 07:12:13 +08:00
|
|
|
if (CurrentVNode + 1 > EndVNode) {
|
2016-05-27 00:06:36 +08:00
|
|
|
if (OutOfNodesWarnings++ < INSTR_PROF_MAX_VP_WARNS) {
|
2016-05-26 05:08:38 +08:00
|
|
|
PROF_WARN("Unable to track new values: %s. "
|
2016-05-26 05:27:02 +08:00
|
|
|
" Consider using option -mllvm -vp-counters-per-site=<n> to "
|
|
|
|
"allocate more"
|
2016-05-26 05:08:38 +08:00
|
|
|
" value profile counters at compile time. \n",
|
|
|
|
"Running out of static counters");
|
|
|
|
}
|
2016-05-22 06:55:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-05-26 05:27:02 +08:00
|
|
|
Node = COMPILER_RT_PTR_FETCH_ADD(ValueProfNode, CurrentVNode, 1);
|
2016-06-01 07:12:13 +08:00
|
|
|
/* Due to section padding, EndVNode point to a byte which is one pass
|
|
|
|
* an incomplete VNode, so we need to skip the last incomplete node. */
|
|
|
|
if (Node + 1 > EndVNode)
|
2016-05-26 05:27:02 +08:00
|
|
|
return 0;
|
|
|
|
|
2016-05-22 06:55:45 +08:00
|
|
|
return Node;
|
|
|
|
}
|
|
|
|
|
2018-04-03 00:57:00 +08:00
|
|
|
static COMPILER_RT_ALWAYS_INLINE void
|
|
|
|
instrumentTargetValueImpl(uint64_t TargetValue, void *Data,
|
|
|
|
uint32_t CounterIndex, uint64_t CountValue) {
|
2015-12-11 04:24:09 +08:00
|
|
|
__llvm_profile_data *PData = (__llvm_profile_data *)Data;
|
|
|
|
if (!PData)
|
|
|
|
return;
|
2018-04-03 00:57:00 +08:00
|
|
|
if (!CountValue)
|
|
|
|
return;
|
2015-12-11 04:24:09 +08:00
|
|
|
if (!PData->Values) {
|
|
|
|
if (!allocateValueProfileCounters(PData))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ValueProfNode **ValueCounters = (ValueProfNode **)PData->Values;
|
|
|
|
ValueProfNode *PrevVNode = NULL;
|
2016-05-20 05:35:34 +08:00
|
|
|
ValueProfNode *MinCountVNode = NULL;
|
2016-06-01 01:41:06 +08:00
|
|
|
ValueProfNode *CurVNode = ValueCounters[CounterIndex];
|
2016-05-20 05:35:34 +08:00
|
|
|
uint64_t MinCount = UINT64_MAX;
|
2015-12-11 04:24:09 +08:00
|
|
|
|
|
|
|
uint8_t VDataCount = 0;
|
2016-06-01 01:41:06 +08:00
|
|
|
while (CurVNode) {
|
|
|
|
if (TargetValue == CurVNode->Value) {
|
2018-04-03 00:57:00 +08:00
|
|
|
CurVNode->Count += CountValue;
|
2015-12-11 04:24:09 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-06-01 01:41:06 +08:00
|
|
|
if (CurVNode->Count < MinCount) {
|
|
|
|
MinCount = CurVNode->Count;
|
|
|
|
MinCountVNode = CurVNode;
|
2016-05-20 05:35:34 +08:00
|
|
|
}
|
2016-06-01 01:41:06 +08:00
|
|
|
PrevVNode = CurVNode;
|
|
|
|
CurVNode = CurVNode->Next;
|
2015-12-11 04:24:09 +08:00
|
|
|
++VDataCount;
|
|
|
|
}
|
|
|
|
|
2016-05-20 05:35:34 +08:00
|
|
|
if (VDataCount >= VPMaxNumValsPerSite) {
|
|
|
|
/* Bump down the min count node's count. If it reaches 0,
|
|
|
|
* evict it. This eviction/replacement policy makes hot
|
|
|
|
* targets more sticky while cold targets less so. In other
|
|
|
|
* words, it makes it less likely for the hot targets to be
|
|
|
|
* prematurally evicted during warmup/establishment period,
|
|
|
|
* when their counts are still low. In a special case when
|
|
|
|
* the number of values tracked is reduced to only one, this
|
|
|
|
* policy will guarantee that the dominating target with >50%
|
|
|
|
* total count will survive in the end. Note that this scheme
|
|
|
|
* allows the runtime to track the min count node in an adaptive
|
|
|
|
* manner. It can correct previous mistakes and eventually
|
|
|
|
* lock on a cold target that is alread in stable state.
|
|
|
|
*
|
|
|
|
* In very rare cases, this replacement scheme may still lead
|
|
|
|
* to target loss. For instance, out of \c N value slots, \c N-1
|
|
|
|
* slots are occupied by luke warm targets during the warmup
|
|
|
|
* period and the remaining one slot is competed by two or more
|
|
|
|
* very hot targets. If those hot targets occur in an interleaved
|
|
|
|
* way, none of them will survive (gain enough weight to throw out
|
|
|
|
* other established entries) due to the ping-pong effect.
|
|
|
|
* To handle this situation, user can choose to increase the max
|
|
|
|
* number of tracked values per value site. Alternatively, a more
|
|
|
|
* expensive eviction mechanism can be implemented. It requires
|
|
|
|
* the runtime to track the total number of evictions per-site.
|
|
|
|
* When the total number of evictions reaches certain threshold,
|
|
|
|
* the runtime can wipe out more than one lowest count entries
|
|
|
|
* to give space for hot targets.
|
|
|
|
*/
|
2018-04-03 00:57:00 +08:00
|
|
|
if (MinCountVNode->Count <= CountValue) {
|
2016-06-01 01:41:06 +08:00
|
|
|
CurVNode = MinCountVNode;
|
|
|
|
CurVNode->Value = TargetValue;
|
2018-04-03 00:57:00 +08:00
|
|
|
CurVNode->Count = CountValue;
|
|
|
|
} else
|
|
|
|
MinCountVNode->Count -= CountValue;
|
|
|
|
|
2015-12-11 04:24:09 +08:00
|
|
|
return;
|
2016-05-20 05:35:34 +08:00
|
|
|
}
|
2015-12-11 04:24:09 +08:00
|
|
|
|
2016-06-01 01:41:06 +08:00
|
|
|
CurVNode = allocateOneNode(PData, CounterIndex, TargetValue);
|
|
|
|
if (!CurVNode)
|
2015-12-11 04:24:09 +08:00
|
|
|
return;
|
2016-06-01 01:41:06 +08:00
|
|
|
CurVNode->Value = TargetValue;
|
2018-04-03 00:57:00 +08:00
|
|
|
CurVNode->Count += CountValue;
|
2015-12-11 04:24:09 +08:00
|
|
|
|
|
|
|
uint32_t Success = 0;
|
|
|
|
if (!ValueCounters[CounterIndex])
|
2015-12-16 11:29:15 +08:00
|
|
|
Success =
|
2016-06-01 01:41:06 +08:00
|
|
|
COMPILER_RT_BOOL_CMPXCHG(&ValueCounters[CounterIndex], 0, CurVNode);
|
2015-12-11 04:24:09 +08:00
|
|
|
else if (PrevVNode && !PrevVNode->Next)
|
2016-06-01 01:41:06 +08:00
|
|
|
Success = COMPILER_RT_BOOL_CMPXCHG(&(PrevVNode->Next), 0, CurVNode);
|
2015-12-11 04:24:09 +08:00
|
|
|
|
2016-05-23 00:36:03 +08:00
|
|
|
if (!Success && !hasStaticCounters) {
|
2016-06-01 01:41:06 +08:00
|
|
|
free(CurVNode);
|
2015-12-11 04:24:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-03 00:57:00 +08:00
|
|
|
COMPILER_RT_VISIBILITY void
|
|
|
|
__llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
|
|
|
|
uint32_t CounterIndex) {
|
|
|
|
instrumentTargetValueImpl(TargetValue, Data, CounterIndex, 1);
|
|
|
|
}
|
|
|
|
COMPILER_RT_VISIBILITY void
|
|
|
|
__llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data,
|
|
|
|
uint32_t CounterIndex,
|
|
|
|
uint64_t CountValue) {
|
|
|
|
instrumentTargetValueImpl(TargetValue, Data, CounterIndex, CountValue);
|
|
|
|
}
|
|
|
|
|
2017-03-16 05:46:31 +08:00
|
|
|
/*
|
|
|
|
* The target values are partitioned into multiple regions/ranges. There is one
|
|
|
|
* contiguous region which is precise -- every value in the range is tracked
|
|
|
|
* individually. A value outside the precise region will be collapsed into one
|
|
|
|
* value depending on the region it falls in.
|
|
|
|
*
|
|
|
|
* There are three regions:
|
|
|
|
* 1. (-inf, PreciseRangeStart) and (PreciseRangeLast, LargeRangeValue) belong
|
|
|
|
* to one region -- all values here should be mapped to one value of
|
|
|
|
* "PreciseRangeLast + 1".
|
|
|
|
* 2. [PreciseRangeStart, PreciseRangeLast]
|
|
|
|
* 3. Large values: [LargeValue, +inf) maps to one value of LargeValue.
|
|
|
|
*
|
|
|
|
* The range for large values is optional. The default value of INT64_MIN
|
|
|
|
* indicates it is not specified.
|
|
|
|
*/
|
|
|
|
COMPILER_RT_VISIBILITY void __llvm_profile_instrument_range(
|
|
|
|
uint64_t TargetValue, void *Data, uint32_t CounterIndex,
|
|
|
|
int64_t PreciseRangeStart, int64_t PreciseRangeLast, int64_t LargeValue) {
|
|
|
|
|
|
|
|
if (LargeValue != INT64_MIN && (int64_t)TargetValue >= LargeValue)
|
|
|
|
TargetValue = LargeValue;
|
|
|
|
else if ((int64_t)TargetValue < PreciseRangeStart ||
|
|
|
|
(int64_t)TargetValue > PreciseRangeLast)
|
|
|
|
TargetValue = PreciseRangeLast + 1;
|
|
|
|
|
|
|
|
__llvm_profile_instrument_target(TargetValue, Data, CounterIndex);
|
|
|
|
}
|
|
|
|
|
2016-05-13 05:43:49 +08:00
|
|
|
/*
|
2016-05-15 04:12:42 +08:00
|
|
|
* A wrapper struct that represents value profile runtime data.
|
|
|
|
* Like InstrProfRecord class which is used by profiling host tools,
|
|
|
|
* ValueProfRuntimeRecord also implements the abstract intefaces defined in
|
|
|
|
* ValueProfRecordClosure so that the runtime data can be serialized using
|
|
|
|
* shared C implementation.
|
2016-05-13 05:43:49 +08:00
|
|
|
*/
|
2016-05-15 04:12:42 +08:00
|
|
|
typedef struct ValueProfRuntimeRecord {
|
|
|
|
const __llvm_profile_data *Data;
|
|
|
|
ValueProfNode **NodesKind[IPVK_Last + 1];
|
|
|
|
uint8_t **SiteCountArray;
|
|
|
|
} ValueProfRuntimeRecord;
|
2016-05-13 05:43:49 +08:00
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
/* ValueProfRecordClosure Interface implementation. */
|
2016-05-13 05:43:49 +08:00
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
static uint32_t getNumValueSitesRT(const void *R, uint32_t VK) {
|
|
|
|
return ((const ValueProfRuntimeRecord *)R)->Data->NumValueSites[VK];
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
static uint32_t getNumValueDataRT(const void *R, uint32_t VK) {
|
|
|
|
uint32_t S = 0, I;
|
|
|
|
const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
|
|
|
|
if (Record->SiteCountArray[VK] == INSTR_PROF_NULLPTR)
|
|
|
|
return 0;
|
|
|
|
for (I = 0; I < Record->Data->NumValueSites[VK]; I++)
|
|
|
|
S += Record->SiteCountArray[VK][I];
|
|
|
|
return S;
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t getNumValueDataForSiteRT(const void *R, uint32_t VK,
|
|
|
|
uint32_t S) {
|
|
|
|
const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
|
2016-05-15 04:12:42 +08:00
|
|
|
return Record->SiteCountArray[VK][S];
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
static ValueProfRuntimeRecord RTRecord;
|
|
|
|
static ValueProfRecordClosure RTRecordClosure = {
|
|
|
|
&RTRecord, INSTR_PROF_NULLPTR, /* GetNumValueKinds */
|
|
|
|
getNumValueSitesRT, getNumValueDataRT, getNumValueDataForSiteRT,
|
|
|
|
INSTR_PROF_NULLPTR, /* RemapValueData */
|
|
|
|
INSTR_PROF_NULLPTR, /* GetValueForSite, */
|
|
|
|
INSTR_PROF_NULLPTR /* AllocValueProfData */
|
|
|
|
};
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
initializeValueProfRuntimeRecord(const __llvm_profile_data *Data,
|
|
|
|
uint8_t *SiteCountArray[]) {
|
|
|
|
unsigned I, J, S = 0, NumValueKinds = 0;
|
|
|
|
ValueProfNode **Nodes = (ValueProfNode **)Data->Values;
|
|
|
|
RTRecord.Data = Data;
|
|
|
|
RTRecord.SiteCountArray = SiteCountArray;
|
|
|
|
for (I = 0; I <= IPVK_Last; I++) {
|
|
|
|
uint16_t N = Data->NumValueSites[I];
|
|
|
|
if (!N)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
NumValueKinds++;
|
|
|
|
|
|
|
|
RTRecord.NodesKind[I] = Nodes ? &Nodes[S] : INSTR_PROF_NULLPTR;
|
|
|
|
for (J = 0; J < N; J++) {
|
|
|
|
/* Compute value count for each site. */
|
|
|
|
uint32_t C = 0;
|
|
|
|
ValueProfNode *Site =
|
|
|
|
Nodes ? RTRecord.NodesKind[I][J] : INSTR_PROF_NULLPTR;
|
|
|
|
while (Site) {
|
|
|
|
C++;
|
|
|
|
Site = Site->Next;
|
|
|
|
}
|
|
|
|
if (C > UCHAR_MAX)
|
|
|
|
C = UCHAR_MAX;
|
|
|
|
RTRecord.SiteCountArray[I][J] = C;
|
|
|
|
}
|
|
|
|
S += N;
|
|
|
|
}
|
|
|
|
return NumValueKinds;
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
static ValueProfNode *getNextNValueData(uint32_t VK, uint32_t Site,
|
|
|
|
InstrProfValueData *Dst,
|
|
|
|
ValueProfNode *StartNode, uint32_t N) {
|
|
|
|
unsigned I;
|
|
|
|
ValueProfNode *VNode = StartNode ? StartNode : RTRecord.NodesKind[VK][Site];
|
2016-05-13 05:43:49 +08:00
|
|
|
for (I = 0; I < N; I++) {
|
2016-05-17 04:33:30 +08:00
|
|
|
Dst[I].Value = VNode->Value;
|
|
|
|
Dst[I].Count = VNode->Count;
|
2016-05-13 05:43:49 +08:00
|
|
|
VNode = VNode->Next;
|
|
|
|
}
|
2016-05-15 04:12:42 +08:00
|
|
|
return VNode;
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-18 02:12:50 +08:00
|
|
|
static uint32_t getValueProfDataSizeWrapper(void) {
|
2016-05-15 04:12:42 +08:00
|
|
|
return getValueProfDataSize(&RTRecordClosure);
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
static uint32_t getNumValueDataForSiteWrapper(uint32_t VK, uint32_t S) {
|
|
|
|
return getNumValueDataForSiteRT(&RTRecord, VK, S);
|
2016-05-13 05:43:49 +08:00
|
|
|
}
|
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
static VPDataReaderType TheVPDataReader = {
|
|
|
|
initializeValueProfRuntimeRecord, getValueProfRecordHeaderSize,
|
|
|
|
getFirstValueProfRecord, getNumValueDataForSiteWrapper,
|
|
|
|
getValueProfDataSizeWrapper, getNextNValueData};
|
2015-12-11 04:24:09 +08:00
|
|
|
|
2016-05-15 04:12:42 +08:00
|
|
|
COMPILER_RT_VISIBILITY VPDataReaderType *lprofGetVPDataReader() {
|
|
|
|
return &TheVPDataReader;
|
2015-12-11 04:24:09 +08:00
|
|
|
}
|