[hwasan] Fix Thread reuse.

HwasanThreadList::DontNeedThread clobbers Thread::next_, breaking the
freelist. As a result, only the top of the freelist ever gets reused,
and the rest of it is lost.

Since the Thread object its associated ring buffer is only 8Kb, this is
typically only noticable in long running processes, such as fuzzers.

Fix the problem by switching from an intrusive linked list to a vector.

Differential Revision: https://reviews.llvm.org/D91208
This commit is contained in:
Evgenii Stepanov 2020-11-10 14:15:47 -08:00
parent 07ba0662da
commit e1eeb026e6
6 changed files with 105 additions and 47 deletions

View File

@ -74,8 +74,6 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
Thread *next_; // All live threads form a linked list.
u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.

View File

@ -66,40 +66,6 @@ static uptr RingBufferSize() {
return 0;
}
struct ThreadListHead {
Thread *list_;
ThreadListHead() : list_(nullptr) {}
void Push(Thread *t) {
t->next_ = list_;
list_ = t;
}
Thread *Pop() {
Thread *t = list_;
if (t)
list_ = t->next_;
return t;
}
void Remove(Thread *t) {
Thread **cur = &list_;
while (*cur != t) cur = &(*cur)->next_;
CHECK(*cur && "thread not found");
*cur = (*cur)->next_;
}
template <class CB>
void ForEach(CB cb) {
Thread *t = list_;
while (t) {
cb(t);
t = t->next_;
}
}
};
struct ThreadStats {
uptr n_live_threads;
uptr total_stack_size;
@ -123,14 +89,15 @@ class HwasanThreadList {
Thread *t;
{
SpinMutexLock l(&list_mutex_);
t = free_list_.Pop();
if (t) {
if (!free_list_.empty()) {
t = free_list_.back();
free_list_.pop_back();
uptr start = (uptr)t - ring_buffer_size_;
internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
} else {
t = AllocThread();
}
live_list_.Push(t);
live_list_.push_back(t);
}
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t);
@ -142,12 +109,21 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
void RemoveThreadFromLiveList(Thread *t) {
for (Thread *&t2 : live_list_)
if (t2 == t) {
live_list_.erase(&t2);
return;
}
CHECK(0 && "thread not found in live list");
}
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
SpinMutexLock l(&list_mutex_);
live_list_.Remove(t);
free_list_.Push(t);
RemoveThreadFromLiveList(t);
free_list_.push_back(t);
DontNeedThread(t);
}
@ -166,7 +142,7 @@ class HwasanThreadList {
template <class CB>
void VisitAllLiveThreads(CB cb) {
SpinMutexLock l(&list_mutex_);
live_list_.ForEach(cb);
for (Thread *t : live_list_) cb(t);
}
void AddThreadStats(Thread *t) {
@ -201,8 +177,8 @@ class HwasanThreadList {
uptr ring_buffer_size_;
uptr thread_alloc_size_;
ThreadListHead free_list_;
ThreadListHead live_list_;
InternalMmapVector<Thread *> free_list_;
InternalMmapVector<Thread *> live_list_;
SpinMutex list_mutex_;
ThreadStats stats_;

View File

@ -543,6 +543,12 @@ class InternalMmapVectorNoCtor {
Swap(size_, other.size_);
}
void erase(T *t) {
if (t + 1 < end())
internal_memmove(t, t + 1, (end() - t - 1) * sizeof(T));
--size_;
}
private:
void Realloc(uptr new_capacity) {
CHECK_GT(new_capacity, 0);

View File

@ -93,7 +93,7 @@ TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) {
CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr));
}
TEST(SanitizerCommon, InternalMmapVectorReize) {
TEST(SanitizerCommon, InternalMmapVectorResize) {
InternalMmapVector<uptr> v;
CHECK_EQ(0U, v.size());
CHECK_GE(v.capacity(), v.size());
@ -176,6 +176,30 @@ TEST(SanitizerCommon, InternalMmapVectorSwap) {
EXPECT_EQ(vector1, vector4);
}
TEST(SanitizerCommon, InternalMmapVectorErase) {
InternalMmapVector<uptr> v;
std::vector<uptr> r;
for (uptr i = 0; i < 10; i++) {
v.push_back(i);
r.push_back(i);
}
v.erase(&v[9]);
r.erase(r.begin() + 9);
EXPECT_EQ(r.size(), v.size());
for (uptr i = 0; i < r.size(); i++) EXPECT_EQ(r[i], v[i]);
v.erase(&v[3]);
r.erase(r.begin() + 3);
EXPECT_EQ(r.size(), v.size());
for (uptr i = 0; i < r.size(); i++) EXPECT_EQ(r[i], v[i]);
v.erase(&v[0]);
r.erase(r.begin());
EXPECT_EQ(r.size(), v.size());
for (uptr i = 0; i < r.size(); i++) EXPECT_EQ(r[i], v[i]);
}
void TestThreadInfo(bool main) {
uptr stk_addr = 0;
uptr stk_size = 0;

View File

@ -0,0 +1,54 @@
// Test that Thread objects are reused.
// RUN: %clangxx_hwasan -mllvm -hwasan-instrument-stack=0 %s -o %t && %env_hwasan_opts=verbose_threads=1 %run %t 2>&1 | FileCheck %s
#include <assert.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sanitizer/hwasan_interface.h>
#include "../utils.h"
pthread_mutex_t mu = PTHREAD_MUTEX_INITIALIZER;
void *threadfn(void *) {
pthread_mutex_lock(UNTAG(&mu));
pthread_mutex_unlock(UNTAG(&mu));
return nullptr;
}
void start_stop_threads() {
constexpr int N = 4;
pthread_t threads[N];
pthread_mutex_lock(UNTAG(&mu));
for (auto &t : threads)
pthread_create(&t, nullptr, threadfn, nullptr);
pthread_mutex_unlock(UNTAG(&mu));
for (auto &t : threads)
pthread_join(t, nullptr);
}
int main() {
// Cut off initial threads.
// CHECK: === test start ===
untag_fprintf(stderr, "=== test start ===\n");
// CHECK: Creating : T{{[0-9]+}} [[A:0x[0-9a-f]+]] stack:
// CHECK: Creating : T{{[0-9]+}} [[B:0x[0-9a-f]+]] stack:
start_stop_threads();
// CHECK-DAG: Creating : T{{[0-9]+}} [[A]] stack:
// CHECK-DAG: Creating : T{{[0-9]+}} [[B]] stack:
start_stop_threads();
// CHECK-DAG: Creating : T{{[0-9]+}} [[A]] stack:
// CHECK-DAG: Creating : T{{[0-9]+}} [[B]] stack:
start_stop_threads();
return 0;
}

View File

@ -34,8 +34,8 @@ void *Use(void *arg) {
// CHECK: in Deallocate
// CHECK: previously allocated here:
// CHECK: in Allocate
// CHECK: Thread: T2 0x
// CHECK: Thread: T3 0x
// CHECK-DAG: Thread: T2 0x
// CHECK-DAG: Thread: T3 0x
// CHECK-DAG: Thread: T0 0x
// CHECK-DAG: Thread: T1 0x
__sync_fetch_and_add(&state, 1);