Add a mutex to guard access to the ThreadPlanStack class

We've seen reports of crashes (none we've been able to reproduce
locally) that look like they are caused by concurrent access to a
thread plan stack.  It looks like there are error paths when an
interrupt request to debugserver times out that cause this problem.

The thread plan stack access is never in a hot loop, and there
aren't enough of them for the extra data member to matter, so
there's really no good reason not to protect the access.

Adding the mutex revealed a couple of places where we were
using "auto" in an iteration when we should have been using
"auto &" - we didn't intend to copy the stack - and I fixed
those as well.

Except for preventing crashes this should be NFC.

Differential Revision: https\://reviews.llvm.org/D106122
This commit is contained in:
Jim Ingham 2021-07-15 20:39:15 -07:00
parent e37bbfe59c
commit 6eb576dcff
2 changed files with 30 additions and 3 deletions

View File

@ -110,6 +110,7 @@ private:
size_t m_completed_plan_checkpoint = 0; // Monotonically increasing token for
// completed plan checkpoints.
std::unordered_map<size_t, PlanStack> m_completed_plan_store;
mutable std::recursive_mutex m_stack_mutex;
};
class ThreadPlanStackMap {
@ -153,7 +154,7 @@ public:
}
void Clear() {
for (auto plan : m_plans_list)
for (auto &plan : m_plans_list)
plan.second.ThreadDestroyed(nullptr);
m_plans_list.clear();
}

View File

@ -39,6 +39,7 @@ ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
void ThreadPlanStack::DumpThreadPlans(Stream &s,
lldb::DescriptionLevel desc_level,
bool include_internal) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
s.IndentMore();
PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
@ -52,6 +53,7 @@ void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
const PlanStack &stack,
lldb::DescriptionLevel desc_level,
bool include_internal) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
// If the stack is empty, just exit:
if (stack.empty())
return;
@ -80,6 +82,7 @@ void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
}
size_t ThreadPlanStack::CheckpointCompletedPlans() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
m_completed_plan_checkpoint++;
m_completed_plan_store.insert(
std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
@ -87,6 +90,7 @@ size_t ThreadPlanStack::CheckpointCompletedPlans() {
}
void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
auto result = m_completed_plan_store.find(checkpoint);
assert(result != m_completed_plan_store.end() &&
"Asked for a checkpoint that didn't exist");
@ -95,11 +99,13 @@ void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
}
void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
m_completed_plan_store.erase(checkpoint);
}
void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
// Tell the plan stacks that this thread is going away:
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
for (ThreadPlanSP plan : m_plans)
plan->ThreadDestroyed();
@ -128,6 +134,7 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
// If the thread plan doesn't already have a tracer, give it its parent's
// tracer:
// The first plan has to be a base plan:
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
"Zeroth plan must be a base plan");
@ -140,6 +147,7 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
}
lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
assert(m_plans.size() > 1 && "Can't pop the base thread plan");
lldb::ThreadPlanSP plan_sp = std::move(m_plans.back());
@ -150,6 +158,7 @@ lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
}
lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
assert(m_plans.size() > 1 && "Can't discard the base thread plan");
lldb::ThreadPlanSP plan_sp = std::move(m_plans.back());
@ -162,6 +171,7 @@ lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
// If the input plan is nullptr, discard all plans. Otherwise make sure this
// plan is in the stack, and if so discard up to and including it.
void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
int stack_size = m_plans.size();
if (up_to_plan_ptr == nullptr) {
@ -189,6 +199,7 @@ void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
}
void ThreadPlanStack::DiscardAllPlans() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
int stack_size = m_plans.size();
for (int i = stack_size - 1; i > 0; i--) {
DiscardPlan();
@ -197,6 +208,7 @@ void ThreadPlanStack::DiscardAllPlans() {
}
void ThreadPlanStack::DiscardConsultingMasterPlans() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
while (true) {
int master_plan_idx;
bool discard = true;
@ -230,11 +242,13 @@ void ThreadPlanStack::DiscardConsultingMasterPlans() {
}
lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
assert(m_plans.size() != 0 && "There will always be a base plan.");
return m_plans.back();
}
lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
if (m_completed_plans.empty())
return {};
@ -252,6 +266,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
bool skip_private) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
uint32_t idx = 0;
for (lldb::ThreadPlanSP plan_sp : m_plans) {
@ -265,6 +280,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
}
lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
if (m_completed_plans.empty())
return {};
@ -278,6 +294,7 @@ lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
}
lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
if (m_completed_plans.empty())
return {};
@ -290,19 +307,23 @@ lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
return {};
}
bool ThreadPlanStack::AnyPlans() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
// There is always a base plan...
return m_plans.size() > 1;
}
bool ThreadPlanStack::AnyCompletedPlans() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
return !m_completed_plans.empty();
}
bool ThreadPlanStack::AnyDiscardedPlans() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
return !m_discarded_plans.empty();
}
bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
for (auto plan : m_completed_plans) {
if (plan.get() == in_plan)
return true;
@ -311,6 +332,7 @@ bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
}
bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
for (auto plan : m_discarded_plans) {
if (plan.get() == in_plan)
return true;
@ -319,6 +341,7 @@ bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
}
ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
if (current_plan == nullptr)
return nullptr;
@ -346,6 +369,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
}
ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
int stack_size = m_plans.size();
for (int i = stack_size - 1; i > 0; i--) {
@ -356,11 +380,13 @@ ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
}
void ThreadPlanStack::ClearThreadCache() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
thread_plan_sp->ClearThreadCache();
}
void ThreadPlanStack::WillResume() {
std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
m_completed_plans.clear();
m_discarded_plans.clear();
}
@ -388,7 +414,7 @@ void ThreadPlanStackMap::Update(ThreadList &current_threads,
std::vector<lldb::tid_t> missing_threads;
// If we are going to delete plans from the plan stack,
// then scan for absent TID's:
for (auto thread_plans : m_plans_list) {
for (auto &thread_plans : m_plans_list) {
lldb::tid_t cur_tid = thread_plans.first;
ThreadSP thread_sp = current_threads.FindThreadByID(cur_tid);
if (!thread_sp)
@ -403,7 +429,7 @@ void ThreadPlanStackMap::DumpPlans(Stream &strm,
lldb::DescriptionLevel desc_level,
bool internal, bool condense_if_trivial,
bool skip_unreported) {
for (auto elem : m_plans_list) {
for (auto &elem : m_plans_list) {
lldb::tid_t tid = elem.first;
uint32_t index_id = 0;
ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);