forked from OSchip/llvm-project
[Sanitizer] Rename InternalVector to InternalMmapVector
llvm-svn: 183972
This commit is contained in:
parent
b289319fb8
commit
b0d92b3312
|
@ -41,7 +41,7 @@ struct DynInitGlobal {
|
|||
Global g;
|
||||
bool initialized;
|
||||
};
|
||||
typedef InternalVector<DynInitGlobal> VectorOfGlobals;
|
||||
typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
|
||||
// Lazy-initialized and never deleted.
|
||||
static VectorOfGlobals *dynamic_init_globals;
|
||||
|
||||
|
|
|
@ -87,7 +87,8 @@ static inline bool CanBeAHeapPointer(uptr p) {
|
|||
// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks
|
||||
// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
|
||||
// so frontier = 0.
|
||||
void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
|
||||
void ScanRangeForPointers(uptr begin, uptr end,
|
||||
InternalMmapVector<uptr> *frontier,
|
||||
const char *region_type, ChunkTag tag) {
|
||||
const uptr alignment = flags()->pointer_alignment();
|
||||
if (flags()->log_pointers)
|
||||
|
@ -116,7 +117,7 @@ void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
|
|||
|
||||
// Scan thread data (stacks and TLS) for heap pointers.
|
||||
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
InternalVector<uptr> *frontier) {
|
||||
InternalMmapVector<uptr> *frontier) {
|
||||
InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
|
||||
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
|
||||
uptr registers_end = registers_begin + registers.size();
|
||||
|
@ -183,7 +184,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
|||
}
|
||||
}
|
||||
|
||||
static void FloodFillTag(InternalVector<uptr> *frontier, ChunkTag tag) {
|
||||
static void FloodFillTag(InternalMmapVector<uptr> *frontier, ChunkTag tag) {
|
||||
while (frontier->size()) {
|
||||
uptr next_chunk = frontier->back();
|
||||
frontier->pop_back();
|
||||
|
@ -214,7 +215,7 @@ void CollectSuppressedCb::operator()(void *p) const {
|
|||
// Set the appropriate tag on each chunk.
|
||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||
// Holds the flood fill frontier.
|
||||
InternalVector<uptr> frontier(GetPageSizeCached());
|
||||
InternalMmapVector<uptr> frontier(GetPageSizeCached());
|
||||
|
||||
if (flags()->use_globals)
|
||||
ProcessGlobalRegions(&frontier);
|
||||
|
|
|
@ -77,7 +77,7 @@ inline Flags *flags() { return &lsan_flags; }
|
|||
|
||||
void InitCommonLsan();
|
||||
// Testing interface. Find leaked chunks and dump their addresses to vector.
|
||||
void ReportLeaked(InternalVector<void *> *leaked, uptr sources);
|
||||
void ReportLeaked(InternalMmapVector<void *> *leaked, uptr sources);
|
||||
// Normal leak check. Find leaks and print a report according to flags.
|
||||
void DoLeakCheck();
|
||||
|
||||
|
@ -97,15 +97,16 @@ class LeakReport {
|
|||
void PrintSummary();
|
||||
bool IsEmpty() { return leaks_.size() == 0; }
|
||||
private:
|
||||
InternalVector<Leak> leaks_;
|
||||
InternalMmapVector<Leak> leaks_;
|
||||
};
|
||||
|
||||
// Platform-specific functions.
|
||||
void InitializePlatformSpecificModules();
|
||||
void ProcessGlobalRegions(InternalVector<uptr> *frontier);
|
||||
void ProcessPlatformSpecificAllocations(InternalVector<uptr> *frontier);
|
||||
void ProcessGlobalRegions(InternalMmapVector<uptr> *frontier);
|
||||
void ProcessPlatformSpecificAllocations(InternalMmapVector<uptr> *frontier);
|
||||
|
||||
void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
|
||||
void ScanRangeForPointers(uptr begin, uptr end,
|
||||
InternalMmapVector<uptr> *frontier,
|
||||
const char *region_type, ChunkTag tag);
|
||||
|
||||
// Callables for iterating over chunks. Those classes are used as template
|
||||
|
@ -116,11 +117,12 @@ void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier,
|
|||
// as reachable and adds them to the frontier.
|
||||
class ProcessPlatformSpecificAllocationsCb {
|
||||
public:
|
||||
explicit ProcessPlatformSpecificAllocationsCb(InternalVector<uptr> *frontier)
|
||||
explicit ProcessPlatformSpecificAllocationsCb(
|
||||
InternalMmapVector<uptr> *frontier)
|
||||
: frontier_(frontier) {}
|
||||
void operator()(void *p) const;
|
||||
private:
|
||||
InternalVector<uptr> *frontier_;
|
||||
InternalMmapVector<uptr> *frontier_;
|
||||
};
|
||||
|
||||
// Prints addresses of unreachable chunks.
|
||||
|
@ -149,11 +151,11 @@ class MarkIndirectlyLeakedCb {
|
|||
// Finds all chunk marked as kIgnored and adds their addresses to frontier.
|
||||
class CollectSuppressedCb {
|
||||
public:
|
||||
explicit CollectSuppressedCb(InternalVector<uptr> *frontier)
|
||||
explicit CollectSuppressedCb(InternalMmapVector<uptr> *frontier)
|
||||
: frontier_(frontier) {}
|
||||
void operator()(void *p) const;
|
||||
private:
|
||||
InternalVector<uptr> *frontier_;
|
||||
InternalMmapVector<uptr> *frontier_;
|
||||
};
|
||||
|
||||
enum IgnoreObjectResult {
|
||||
|
|
|
@ -53,8 +53,8 @@ void InitializePlatformSpecificModules() {
|
|||
|
||||
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||
void *data) {
|
||||
InternalVector<uptr> *frontier =
|
||||
reinterpret_cast<InternalVector<uptr> *>(data);
|
||||
InternalMmapVector<uptr> *frontier =
|
||||
reinterpret_cast<InternalMmapVector<uptr> *>(data);
|
||||
for (uptr j = 0; j < info->dlpi_phnum; j++) {
|
||||
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
|
||||
// We're looking for .data and .bss sections, which reside in writeable,
|
||||
|
@ -83,7 +83,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
|||
}
|
||||
|
||||
// Scan global variables for heap pointers.
|
||||
void ProcessGlobalRegions(InternalVector<uptr> *frontier) {
|
||||
void ProcessGlobalRegions(InternalMmapVector<uptr> *frontier) {
|
||||
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
|
||||
// deadlocking by running this under StopTheWorld. However, the lock is
|
||||
// reentrant, so we should be able to fix this by acquiring the lock before
|
||||
|
@ -114,7 +114,7 @@ void ProcessPlatformSpecificAllocationsCb::operator()(void *p) const {
|
|||
|
||||
// Handle dynamically allocated TLS blocks by treating all chunks allocated from
|
||||
// ld-linux.so as reachable.
|
||||
void ProcessPlatformSpecificAllocations(InternalVector<uptr> *frontier) {
|
||||
void ProcessPlatformSpecificAllocations(InternalMmapVector<uptr> *frontier) {
|
||||
if (!flags()->use_tls) return;
|
||||
if (!linker) return;
|
||||
ForEachChunk(ProcessPlatformSpecificAllocationsCb(frontier));
|
||||
|
|
|
@ -274,15 +274,15 @@ INLINE int ToLower(int c) {
|
|||
// small vectors.
|
||||
// WARNING: The current implementation supports only POD types.
|
||||
template<typename T>
|
||||
class InternalVector {
|
||||
class InternalMmapVector {
|
||||
public:
|
||||
explicit InternalVector(uptr initial_capacity) {
|
||||
explicit InternalMmapVector(uptr initial_capacity) {
|
||||
CHECK_GT(initial_capacity, 0);
|
||||
capacity_ = initial_capacity;
|
||||
size_ = 0;
|
||||
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalVector");
|
||||
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
|
||||
}
|
||||
~InternalVector() {
|
||||
~InternalMmapVector() {
|
||||
UnmapOrDie(data_, capacity_ * sizeof(T));
|
||||
}
|
||||
T &operator[](uptr i) {
|
||||
|
@ -324,7 +324,7 @@ class InternalVector {
|
|||
CHECK_GT(new_capacity, 0);
|
||||
CHECK_LE(size_, new_capacity);
|
||||
T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
|
||||
"InternalVector");
|
||||
"InternalMmapVector");
|
||||
internal_memcpy(new_data, data_, size_ * sizeof(T));
|
||||
T *old_data = data_;
|
||||
data_ = new_data;
|
||||
|
@ -332,15 +332,15 @@ class InternalVector {
|
|||
capacity_ = new_capacity;
|
||||
}
|
||||
// Disallow evil constructors.
|
||||
InternalVector(const InternalVector&);
|
||||
void operator=(const InternalVector&);
|
||||
InternalMmapVector(const InternalMmapVector&);
|
||||
void operator=(const InternalMmapVector&);
|
||||
|
||||
T *data_;
|
||||
uptr capacity_;
|
||||
uptr size_;
|
||||
};
|
||||
|
||||
// HeapSort for arrays and InternalVector.
|
||||
// HeapSort for arrays and InternalMmapVector.
|
||||
template<class Container, class Compare>
|
||||
void InternalSort(Container *v, uptr size, Compare comp) {
|
||||
if (size < 2)
|
||||
|
|
|
@ -46,7 +46,7 @@ class SuspendedThreadsList {
|
|||
}
|
||||
|
||||
private:
|
||||
InternalVector<SuspendedThreadID> thread_ids_;
|
||||
InternalMmapVector<SuspendedThreadID> thread_ids_;
|
||||
|
||||
// Prohibit copy and assign.
|
||||
SuspendedThreadsList(const SuspendedThreadsList&);
|
||||
|
|
|
@ -97,8 +97,8 @@ TEST(SanitizerCommon, SanitizerSetThreadName) {
|
|||
}
|
||||
#endif
|
||||
|
||||
TEST(SanitizerCommon, InternalVector) {
|
||||
InternalVector<uptr> vector(1);
|
||||
TEST(SanitizerCommon, InternalMmapVector) {
|
||||
InternalMmapVector<uptr> vector(1);
|
||||
for (uptr i = 0; i < 100; i++) {
|
||||
EXPECT_EQ(i, vector.size());
|
||||
vector.push_back(i);
|
||||
|
|
Loading…
Reference in New Issue