forked from OSchip/llvm-project
Make hit point counts reliable for architectures that stop before evaluation.
Since we want to present the "new & old" values for watchpoint hits, on architectures, including the ARM family, that stop before the triggering instruction is run, we need to single step over the instruction before stopping for realz. This was incorrectly done directly in the StopInfoWatchpoint::ShouldStop. That causes problems if more than one thread stops "for a reason" at the same time as the watchpoint, since the other actions didn't expect the process to make progress in this part of the execution control machinery. The correct way to do this is to schedule the step over using ThreadPlans, and then to restore the stop info after that plan stops, so that the rest of the stop info actions can happen when all the other threads have handled their immediate actions as well. Differential Revision: https://reviews.llvm.org/D129814
This commit is contained in:
parent
8d0383eb69
commit
5778ada8e5
|
@ -157,12 +157,15 @@ public:
|
|||
private:
|
||||
friend class Target;
|
||||
friend class WatchpointList;
|
||||
friend class StopInfoWatchpoint; // This needs to call UndoHitCount()
|
||||
|
||||
void ResetHistoricValues() {
|
||||
m_old_value_sp.reset();
|
||||
m_new_value_sp.reset();
|
||||
}
|
||||
|
||||
void UndoHitCount() { m_hit_counter.Decrement(); }
|
||||
|
||||
Target &m_target;
|
||||
bool m_enabled; // Is this watchpoint enabled
|
||||
bool m_is_hardware; // Is this a hardware watchpoint
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
namespace lldb_private {
|
||||
|
||||
class StopInfo {
|
||||
class StopInfo : public std::enable_shared_from_this<StopInfo> {
|
||||
friend class Process::ProcessEventData;
|
||||
friend class ThreadPlanBase;
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "lldb/Target/Target.h"
|
||||
#include "lldb/Target/Thread.h"
|
||||
#include "lldb/Target/ThreadPlan.h"
|
||||
#include "lldb/Target/ThreadPlanStepInstruction.h"
|
||||
#include "lldb/Target/UnixSignals.h"
|
||||
#include "lldb/Utility/LLDBLog.h"
|
||||
#include "lldb/Utility/Log.h"
|
||||
|
@ -690,40 +691,128 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
using StopInfoWatchpointSP = std::shared_ptr<StopInfoWatchpoint>;
|
||||
// This plan is used to orchestrate stepping over the watchpoint for
|
||||
// architectures (e.g. ARM) that report the watch before running the watched
|
||||
// access. This is the sort of job you have to defer to the thread plans,
|
||||
// if you try to do it directly in the stop info and there are other threads
|
||||
// that needed to process this stop you will have yanked control away from
|
||||
// them and they won't behave correctly.
|
||||
class ThreadPlanStepOverWatchpoint : public ThreadPlanStepInstruction {
|
||||
public:
|
||||
ThreadPlanStepOverWatchpoint(Thread &thread,
|
||||
StopInfoWatchpointSP stop_info_sp,
|
||||
WatchpointSP watch_sp)
|
||||
: ThreadPlanStepInstruction(thread, false, true, eVoteNoOpinion,
|
||||
eVoteNoOpinion),
|
||||
m_stop_info_sp(stop_info_sp), m_watch_sp(watch_sp) {
|
||||
assert(watch_sp);
|
||||
m_watch_index = watch_sp->GetHardwareIndex();
|
||||
}
|
||||
|
||||
bool DoWillResume(lldb::StateType resume_state,
|
||||
bool current_plan) override {
|
||||
if (m_did_reset_watchpoint) {
|
||||
GetThread().GetProcess()->DisableWatchpoint(m_watch_sp.get(), false);
|
||||
m_did_reset_watchpoint = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void DidPop() override {
|
||||
// Don't artifically keep the watchpoint alive.
|
||||
m_watch_sp.reset();
|
||||
}
|
||||
|
||||
bool ShouldStop(Event *event_ptr) override {
|
||||
bool should_stop = ThreadPlanStepInstruction::ShouldStop(event_ptr);
|
||||
bool plan_done = MischiefManaged();
|
||||
if (plan_done) {
|
||||
m_stop_info_sp->SetStepOverPlanComplete();
|
||||
GetThread().SetStopInfo(m_stop_info_sp);
|
||||
ResetWatchpoint();
|
||||
}
|
||||
return should_stop;
|
||||
}
|
||||
|
||||
protected:
|
||||
void ResetWatchpoint() {
|
||||
if (m_did_reset_watchpoint)
|
||||
return;
|
||||
m_did_reset_watchpoint = true;
|
||||
GetThread().GetProcess()->EnableWatchpoint(m_watch_sp.get(), false);
|
||||
m_watch_sp->SetHardwareIndex(m_watch_index);
|
||||
}
|
||||
|
||||
private:
|
||||
StopInfoWatchpointSP m_stop_info_sp;
|
||||
WatchpointSP m_watch_sp;
|
||||
uint32_t m_watch_index = LLDB_INVALID_INDEX32;
|
||||
bool m_did_reset_watchpoint = false;
|
||||
};
|
||||
|
||||
bool ShouldStopSynchronous(Event *event_ptr) override {
|
||||
// ShouldStop() method is idempotent and should not affect hit count. See
|
||||
// Process::RunPrivateStateThread()->Process()->HandlePrivateEvent()
|
||||
// -->Process()::ShouldBroadcastEvent()->ThreadList::ShouldStop()->
|
||||
// Thread::ShouldStop()->ThreadPlanBase::ShouldStop()->
|
||||
// StopInfoWatchpoint::ShouldStop() and
|
||||
// Event::DoOnRemoval()->Process::ProcessEventData::DoOnRemoval()->
|
||||
// StopInfoWatchpoint::PerformAction().
|
||||
if (m_should_stop_is_valid)
|
||||
return m_should_stop;
|
||||
// If we are running our step-over the watchpoint plan, stop if it's done
|
||||
// and continue if it's not:
|
||||
if (m_using_step_over_plan)
|
||||
return m_step_over_plan_complete;
|
||||
|
||||
ThreadSP thread_sp(m_thread_wp.lock());
|
||||
if (thread_sp) {
|
||||
WatchpointSP wp_sp(
|
||||
thread_sp->CalculateTarget()->GetWatchpointList().FindByID(
|
||||
GetValue()));
|
||||
if (wp_sp) {
|
||||
// Check if we should stop at a watchpoint.
|
||||
ExecutionContext exe_ctx(thread_sp->GetStackFrameAtIndex(0));
|
||||
StoppointCallbackContext context(event_ptr, exe_ctx, true);
|
||||
m_should_stop = wp_sp->ShouldStop(&context);
|
||||
} else {
|
||||
Log *log = GetLog(LLDBLog::Process);
|
||||
assert(thread_sp);
|
||||
WatchpointSP wp_sp(
|
||||
thread_sp->CalculateTarget()->GetWatchpointList().FindByID(GetValue()));
|
||||
if (!wp_sp) {
|
||||
Log *log = GetLog(LLDBLog::Process);
|
||||
|
||||
LLDB_LOGF(log,
|
||||
"Process::%s could not find watchpoint location id: %" PRId64
|
||||
"...",
|
||||
__FUNCTION__, GetValue());
|
||||
LLDB_LOGF(log,
|
||||
"Process::%s could not find watchpoint location id: %" PRId64
|
||||
"...",
|
||||
__FUNCTION__, GetValue());
|
||||
|
||||
m_should_stop = true;
|
||||
}
|
||||
m_should_stop = true;
|
||||
m_should_stop_is_valid = true;
|
||||
return true;
|
||||
}
|
||||
m_should_stop_is_valid = true;
|
||||
return m_should_stop;
|
||||
|
||||
ExecutionContext exe_ctx(thread_sp->GetStackFrameAtIndex(0));
|
||||
StoppointCallbackContext context(event_ptr, exe_ctx, true);
|
||||
m_should_stop = wp_sp->ShouldStop(&context);
|
||||
if (!m_should_stop) {
|
||||
// This won't happen at present because we only allow one watchpoint per
|
||||
// watched range. So we won't stop at a watched address with a disabled
|
||||
// watchpoint. If we start allowing overlapping watchpoints, then we
|
||||
// will have to make watchpoints be real "WatchpointSite" and delegate to
|
||||
// all the watchpoints sharing the site. In that case, the code below
|
||||
// would be the right thing to do.
|
||||
m_should_stop_is_valid = true;
|
||||
return m_should_stop;
|
||||
}
|
||||
// If this is a system where we need to execute the watchpoint by hand
|
||||
// after the hit, queue a thread plan to do that, and then say not to stop.
|
||||
// Otherwise, let the async action figure out whether the watchpoint should
|
||||
// stop
|
||||
|
||||
ProcessSP process_sp = exe_ctx.GetProcessSP();
|
||||
uint32_t num;
|
||||
bool wp_triggers_after;
|
||||
|
||||
if (process_sp->GetWatchpointSupportInfo(num, wp_triggers_after)
|
||||
.Success()) {
|
||||
if (wp_triggers_after)
|
||||
return true;
|
||||
|
||||
StopInfoWatchpointSP me_as_siwp_sp
|
||||
= std::static_pointer_cast<StopInfoWatchpoint>(shared_from_this());
|
||||
ThreadPlanSP step_over_wp_sp(new ThreadPlanStepOverWatchpoint(
|
||||
*(thread_sp.get()), me_as_siwp_sp, wp_sp));
|
||||
Status error;
|
||||
error = thread_sp->QueueThreadPlan(step_over_wp_sp, false);
|
||||
m_using_step_over_plan = true;
|
||||
return !error.Success();
|
||||
}
|
||||
// If we don't have to step over the watchpoint, just let the PerformAction
|
||||
// determine what we should do.
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ShouldStop(Event *event_ptr) override {
|
||||
|
@ -749,57 +838,12 @@ protected:
|
|||
thread_sp->CalculateTarget()->GetWatchpointList().FindByID(
|
||||
GetValue()));
|
||||
if (wp_sp) {
|
||||
ExecutionContext exe_ctx(thread_sp->GetStackFrameAtIndex(0));
|
||||
ProcessSP process_sp = exe_ctx.GetProcessSP();
|
||||
|
||||
{
|
||||
// check if this process is running on an architecture where
|
||||
// watchpoints trigger before the associated instruction runs. if so,
|
||||
// disable the WP, single-step and then re-enable the watchpoint
|
||||
if (process_sp) {
|
||||
uint32_t num;
|
||||
bool wp_triggers_after;
|
||||
|
||||
if (process_sp->GetWatchpointSupportInfo(num, wp_triggers_after)
|
||||
.Success()) {
|
||||
if (!wp_triggers_after) {
|
||||
// We need to preserve the watch_index before watchpoint is
|
||||
// disable. Since Watchpoint::SetEnabled will clear the watch
|
||||
// index. This will fix TestWatchpointIter failure
|
||||
Watchpoint *wp = wp_sp.get();
|
||||
uint32_t watch_index = wp->GetHardwareIndex();
|
||||
process_sp->DisableWatchpoint(wp, false);
|
||||
StopInfoSP stored_stop_info_sp = thread_sp->GetStopInfo();
|
||||
assert(stored_stop_info_sp.get() == this);
|
||||
|
||||
Status new_plan_status;
|
||||
ThreadPlanSP new_plan_sp(
|
||||
thread_sp->QueueThreadPlanForStepSingleInstruction(
|
||||
false, // step-over
|
||||
false, // abort_other_plans
|
||||
true, // stop_other_threads
|
||||
new_plan_status));
|
||||
if (new_plan_sp && new_plan_status.Success()) {
|
||||
new_plan_sp->SetIsControllingPlan(true);
|
||||
new_plan_sp->SetOkayToDiscard(false);
|
||||
new_plan_sp->SetPrivate(true);
|
||||
}
|
||||
process_sp->GetThreadList().SetSelectedThreadByID(
|
||||
thread_sp->GetID());
|
||||
process_sp->ResumeSynchronous(nullptr);
|
||||
process_sp->GetThreadList().SetSelectedThreadByID(
|
||||
thread_sp->GetID());
|
||||
thread_sp->SetStopInfo(stored_stop_info_sp);
|
||||
process_sp->EnableWatchpoint(wp, false);
|
||||
wp->SetHardwareIndex(watch_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This sentry object makes sure the current watchpoint is disabled
|
||||
// while performing watchpoint actions, and it is then enabled after we
|
||||
// are finished.
|
||||
ExecutionContext exe_ctx(thread_sp->GetStackFrameAtIndex(0));
|
||||
ProcessSP process_sp = exe_ctx.GetProcessSP();
|
||||
|
||||
WatchpointSentry sentry(process_sp, wp_sp);
|
||||
|
||||
/*
|
||||
|
@ -825,18 +869,10 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: This condition should be checked in the synchronous part of the
|
||||
// watchpoint code
|
||||
// (Watchpoint::ShouldStop), so that we avoid pulling an event even if
|
||||
// the watchpoint fails the ignore count condition. It is moved here
|
||||
// temporarily, because for archs with
|
||||
// watchpoint_exceptions_received=before, the code in the previous
|
||||
// lines takes care of moving the inferior to next PC. We have to check
|
||||
// the ignore count condition after this is done, otherwise we will hit
|
||||
// same watchpoint multiple times until we pass ignore condition, but
|
||||
// we won't actually be ignoring them.
|
||||
if (wp_sp->GetHitCount() <= wp_sp->GetIgnoreCount())
|
||||
if (wp_sp->GetHitCount() <= wp_sp->GetIgnoreCount()) {
|
||||
m_should_stop = false;
|
||||
m_should_stop_is_valid = true;
|
||||
}
|
||||
|
||||
Debugger &debugger = exe_ctx.GetTargetRef().GetDebugger();
|
||||
|
||||
|
@ -859,10 +895,9 @@ protected:
|
|||
Scalar scalar_value;
|
||||
if (result_value_sp->ResolveValue(scalar_value)) {
|
||||
if (scalar_value.ULongLong(1) == 0) {
|
||||
// We have been vetoed. This takes precedence over querying
|
||||
// the watchpoint whether it should stop (aka ignore count
|
||||
// and friends). See also StopInfoWatchpoint::ShouldStop()
|
||||
// as well as Process::ProcessEventData::DoOnRemoval().
|
||||
// The condition failed, which we consider "not having hit
|
||||
// the watchpoint" so undo the hit count here.
|
||||
wp_sp->UndoHitCount();
|
||||
m_should_stop = false;
|
||||
} else
|
||||
m_should_stop = true;
|
||||
|
@ -946,9 +981,16 @@ protected:
|
|||
}
|
||||
|
||||
private:
|
||||
void SetStepOverPlanComplete() {
|
||||
assert(m_using_step_over_plan);
|
||||
m_step_over_plan_complete = true;
|
||||
}
|
||||
|
||||
bool m_should_stop = false;
|
||||
bool m_should_stop_is_valid = false;
|
||||
lldb::addr_t m_watch_hit_addr;
|
||||
bool m_step_over_plan_complete = false;
|
||||
bool m_using_step_over_plan = false;
|
||||
};
|
||||
|
||||
// StopInfoUnixSignal
|
||||
|
|
|
@ -82,4 +82,4 @@ class WatchpointConditionCmdTestCase(TestBase):
|
|||
# Use the '-v' option to do verbose listing of the watchpoint.
|
||||
# The hit count should now be 2.
|
||||
self.expect("watchpoint list -v",
|
||||
substrs=['hit_count = 5'])
|
||||
substrs=['hit_count = 1'])
|
||||
|
|
|
@ -11,10 +11,6 @@ class ConcurrentDelayWatchBreak(ConcurrentEventsBase):
|
|||
|
||||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://81811539")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test (1-second delay) watchpoint and a breakpoint in multiple threads."""
|
||||
|
|
|
@ -10,10 +10,6 @@ class ConcurrentManyWatchpoints(ConcurrentEventsBase):
|
|||
|
||||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
@skipIfOutOfTreeDebugserver
|
||||
def test(self):
|
||||
|
|
|
@ -13,10 +13,6 @@ class ConcurrentNWatchNBreak(ConcurrentEventsBase):
|
|||
@skipIf(triple='^mips')
|
||||
@expectedFailureAll(archs=["aarch64"], oslist=["freebsd"],
|
||||
bugnumber="llvm.org/pr49433")
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test with 5 watchpoint and breakpoint threads."""
|
||||
|
|
|
@ -14,10 +14,6 @@ class ConcurrentSignalNWatchNBreak(ConcurrentEventsBase):
|
|||
@expectedFailureNetBSD
|
||||
@expectedFailureAll(archs=["aarch64"], oslist=["freebsd"],
|
||||
bugnumber="llvm.org/pr49433")
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test one signal thread with 5 watchpoint and breakpoint threads."""
|
||||
|
|
|
@ -11,10 +11,6 @@ class ConcurrentSignalWatch(ConcurrentEventsBase):
|
|||
|
||||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test a watchpoint and a signal in multiple threads."""
|
||||
|
|
|
@ -12,10 +12,6 @@ class ConcurrentSignalWatchBreak(ConcurrentEventsBase):
|
|||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@expectedFailureNetBSD
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test a signal/watchpoint/breakpoint in multiple threads."""
|
||||
|
|
|
@ -11,10 +11,6 @@ class ConcurrentTwoWatchpointThreads(ConcurrentEventsBase):
|
|||
|
||||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test two threads that trigger a watchpoint. """
|
||||
|
|
|
@ -11,10 +11,6 @@ class ConcurrentTwoWatchpointsOneBreakpoint(ConcurrentEventsBase):
|
|||
|
||||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test two threads that trigger a watchpoint and one breakpoint thread. """
|
||||
|
|
|
@ -11,10 +11,6 @@ class ConcurrentTwoWatchpointsOneDelayBreakpoint(ConcurrentEventsBase):
|
|||
|
||||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test two threads that trigger a watchpoint and one (1 second delay) breakpoint thread. """
|
||||
|
|
|
@ -12,10 +12,6 @@ class ConcurrentTwoWatchpointsOneSignal(ConcurrentEventsBase):
|
|||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@expectedFailureNetBSD
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
@add_test_categories(["watchpoint"])
|
||||
def test(self):
|
||||
"""Test two threads that trigger a watchpoint and one signal thread. """
|
||||
|
|
|
@ -12,10 +12,6 @@ class ConcurrentWatchBreak(ConcurrentEventsBase):
|
|||
# Atomic sequences are not supported yet for MIPS in LLDB.
|
||||
@skipIf(triple='^mips')
|
||||
@add_test_categories(["watchpoint"])
|
||||
@skipIf(
|
||||
oslist=["ios", "watchos", "tvos", "bridgeos", "macosx"],
|
||||
archs=['arm64', 'arm64e', 'arm64_32', 'arm'],
|
||||
bugnumber="rdar://93863107")
|
||||
|
||||
def test(self):
|
||||
"""Test watchpoint and a breakpoint in multiple threads."""
|
||||
|
|
Loading…
Reference in New Issue