2010-09-10 15:49:16 +08:00
|
|
|
//===-- FuncUnwinders.cpp ----------------------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-04-26 02:36:36 +08:00
|
|
|
#include "lldb/Core/AddressRange.h"
|
|
|
|
#include "lldb/Core/Address.h"
|
2010-09-10 15:49:16 +08:00
|
|
|
#include "lldb/Symbol/FuncUnwinders.h"
|
2011-04-26 02:36:36 +08:00
|
|
|
#include "lldb/Symbol/DWARFCallFrameInfo.h"
|
2010-09-10 15:49:16 +08:00
|
|
|
#include "lldb/Symbol/ObjectFile.h"
|
|
|
|
#include "lldb/Symbol/UnwindPlan.h"
|
|
|
|
#include "lldb/Symbol/UnwindTable.h"
|
2011-05-12 02:39:18 +08:00
|
|
|
#include "lldb/Target/ABI.h"
|
2012-02-21 08:09:25 +08:00
|
|
|
#include "lldb/Target/ExecutionContext.h"
|
2011-05-12 02:39:18 +08:00
|
|
|
#include "lldb/Target/Process.h"
|
2010-09-10 15:49:16 +08:00
|
|
|
#include "lldb/Target/Thread.h"
|
|
|
|
#include "lldb/Target/Target.h"
|
2011-04-26 05:14:26 +08:00
|
|
|
#include "lldb/Target/UnwindAssembly.h"
|
2010-09-10 15:49:16 +08:00
|
|
|
|
|
|
|
using namespace lldb;
|
|
|
|
using namespace lldb_private;
|
|
|
|
|
|
|
|
|
2011-01-08 08:05:12 +08:00
|
|
|
FuncUnwinders::FuncUnwinders
|
|
|
|
(
|
|
|
|
UnwindTable& unwind_table,
|
2011-04-26 05:14:26 +08:00
|
|
|
UnwindAssembly *assembly_profiler,
|
2011-01-08 08:05:12 +08:00
|
|
|
AddressRange range
|
|
|
|
) :
|
|
|
|
m_unwind_table(unwind_table),
|
|
|
|
m_assembly_profiler(assembly_profiler),
|
|
|
|
m_range(range),
|
2011-01-09 05:19:00 +08:00
|
|
|
m_mutex (Mutex::eMutexTypeNormal),
|
2011-02-15 08:19:15 +08:00
|
|
|
m_unwind_plan_call_site_sp (),
|
|
|
|
m_unwind_plan_non_call_site_sp (),
|
|
|
|
m_unwind_plan_fast_sp (),
|
|
|
|
m_unwind_plan_arch_default_sp (),
|
2011-01-08 08:05:12 +08:00
|
|
|
m_tried_unwind_at_call_site (false),
|
|
|
|
m_tried_unwind_at_non_call_site (false),
|
2011-01-09 05:19:00 +08:00
|
|
|
m_tried_unwind_fast (false),
|
|
|
|
m_tried_unwind_arch_default (false),
|
2011-09-15 08:44:34 +08:00
|
|
|
m_tried_unwind_arch_default_at_func_entry (false),
|
2011-01-08 08:05:12 +08:00
|
|
|
m_first_non_prologue_insn()
|
|
|
|
{
|
|
|
|
}
|
2010-09-10 15:49:16 +08:00
|
|
|
|
|
|
|
FuncUnwinders::~FuncUnwinders ()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-02-15 08:19:15 +08:00
|
|
|
UnwindPlanSP
|
2010-11-12 13:23:10 +08:00
|
|
|
FuncUnwinders::GetUnwindPlanAtCallSite (int current_offset)
|
2010-09-10 15:49:16 +08:00
|
|
|
{
|
2011-01-09 05:19:00 +08:00
|
|
|
// Lock the mutex to ensure we can always give out the most appropriate
|
|
|
|
// information. We want to make sure if someone requests a call site unwind
|
|
|
|
// plan, that they get one and don't run into a race condition where one
|
|
|
|
// thread has started to create the unwind plan and has put it into
|
2011-02-15 08:19:15 +08:00
|
|
|
// m_unwind_plan_call_site_sp, and have another thread enter this function
|
|
|
|
// and return the partially filled in m_unwind_plan_call_site_sp pointer.
|
2011-01-09 05:19:00 +08:00
|
|
|
// We also want to make sure that we lock out other unwind plans from
|
|
|
|
// being accessed until this one is done creating itself in case someone
|
|
|
|
// had some code like:
|
|
|
|
// UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
|
|
|
|
// if (best_unwind_plan == NULL)
|
|
|
|
// best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
|
|
|
|
Mutex::Locker locker (m_mutex);
|
2011-02-15 08:19:15 +08:00
|
|
|
if (m_tried_unwind_at_call_site == false && m_unwind_plan_call_site_sp.get() == NULL)
|
2010-09-10 15:49:16 +08:00
|
|
|
{
|
2011-01-08 08:05:12 +08:00
|
|
|
m_tried_unwind_at_call_site = true;
|
|
|
|
// We have cases (e.g. with _sigtramp on Mac OS X) where the hand-written eh_frame unwind info for a
|
|
|
|
// function does not cover the entire range of the function and so the FDE only lists a subset of the
|
|
|
|
// address range. If we try to look up the unwind info by the starting address of the function
|
|
|
|
// (i.e. m_range.GetBaseAddress()) we may not find the eh_frame FDE. We need to use the actual byte offset
|
|
|
|
// into the function when looking it up.
|
|
|
|
|
|
|
|
if (m_range.GetBaseAddress().IsValid())
|
2010-09-10 15:49:16 +08:00
|
|
|
{
|
2011-01-08 08:05:12 +08:00
|
|
|
Address current_pc (m_range.GetBaseAddress ());
|
|
|
|
if (current_offset != -1)
|
|
|
|
current_pc.SetOffset (current_pc.GetOffset() + current_offset);
|
|
|
|
|
|
|
|
DWARFCallFrameInfo *eh_frame = m_unwind_table.GetEHFrameInfo();
|
|
|
|
if (eh_frame)
|
|
|
|
{
|
2011-05-12 02:39:18 +08:00
|
|
|
m_unwind_plan_call_site_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
|
2011-02-15 08:19:15 +08:00
|
|
|
if (!eh_frame->GetUnwindPlan (current_pc, *m_unwind_plan_call_site_sp))
|
|
|
|
m_unwind_plan_call_site_sp.reset();
|
2011-01-08 08:05:12 +08:00
|
|
|
}
|
2010-09-10 15:49:16 +08:00
|
|
|
}
|
|
|
|
}
|
2011-02-15 08:19:15 +08:00
|
|
|
return m_unwind_plan_call_site_sp;
|
2010-09-10 15:49:16 +08:00
|
|
|
}
|
|
|
|
|
2011-02-15 08:19:15 +08:00
|
|
|
UnwindPlanSP
|
2010-09-10 15:49:16 +08:00
|
|
|
FuncUnwinders::GetUnwindPlanAtNonCallSite (Thread& thread)
|
|
|
|
{
|
2011-01-09 05:19:00 +08:00
|
|
|
// Lock the mutex to ensure we can always give out the most appropriate
|
|
|
|
// information. We want to make sure if someone requests an unwind
|
|
|
|
// plan, that they get one and don't run into a race condition where one
|
|
|
|
// thread has started to create the unwind plan and has put it into
|
2013-04-19 02:10:51 +08:00
|
|
|
// the unique pointer member variable, and have another thread enter this function
|
|
|
|
// and return the partially filled pointer contained in the unique pointer.
|
2011-01-09 05:19:00 +08:00
|
|
|
// We also want to make sure that we lock out other unwind plans from
|
|
|
|
// being accessed until this one is done creating itself in case someone
|
|
|
|
// had some code like:
|
|
|
|
// UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
|
|
|
|
// if (best_unwind_plan == NULL)
|
|
|
|
// best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
|
|
|
|
Mutex::Locker locker (m_mutex);
|
2011-02-15 08:19:15 +08:00
|
|
|
if (m_tried_unwind_at_non_call_site == false && m_unwind_plan_non_call_site_sp.get() == NULL)
|
2010-10-25 19:12:07 +08:00
|
|
|
{
|
2011-01-08 08:05:12 +08:00
|
|
|
m_tried_unwind_at_non_call_site = true;
|
2013-09-24 10:42:54 +08:00
|
|
|
if (m_assembly_profiler)
|
|
|
|
{
|
|
|
|
m_unwind_plan_non_call_site_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
|
|
|
|
if (!m_assembly_profiler->GetNonCallSiteUnwindPlanFromAssembly (m_range, thread, *m_unwind_plan_non_call_site_sp))
|
|
|
|
m_unwind_plan_non_call_site_sp.reset();
|
|
|
|
}
|
2010-10-25 19:12:07 +08:00
|
|
|
}
|
2011-02-15 08:19:15 +08:00
|
|
|
return m_unwind_plan_non_call_site_sp;
|
2010-09-10 15:49:16 +08:00
|
|
|
}
|
|
|
|
|
2011-02-15 08:19:15 +08:00
|
|
|
UnwindPlanSP
|
2010-09-10 15:49:16 +08:00
|
|
|
FuncUnwinders::GetUnwindPlanFastUnwind (Thread& thread)
|
|
|
|
{
|
2011-01-09 05:19:00 +08:00
|
|
|
// Lock the mutex to ensure we can always give out the most appropriate
|
|
|
|
// information. We want to make sure if someone requests an unwind
|
|
|
|
// plan, that they get one and don't run into a race condition where one
|
|
|
|
// thread has started to create the unwind plan and has put it into
|
2013-04-19 02:10:51 +08:00
|
|
|
// the unique pointer member variable, and have another thread enter this function
|
|
|
|
// and return the partially filled pointer contained in the unique pointer.
|
2011-01-09 05:19:00 +08:00
|
|
|
// We also want to make sure that we lock out other unwind plans from
|
|
|
|
// being accessed until this one is done creating itself in case someone
|
|
|
|
// had some code like:
|
|
|
|
// UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
|
|
|
|
// if (best_unwind_plan == NULL)
|
|
|
|
// best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
|
|
|
|
Mutex::Locker locker (m_mutex);
|
2011-02-15 08:19:15 +08:00
|
|
|
if (m_tried_unwind_fast == false && m_unwind_plan_fast_sp.get() == NULL)
|
2010-10-25 19:12:07 +08:00
|
|
|
{
|
2011-01-09 05:19:00 +08:00
|
|
|
m_tried_unwind_fast = true;
|
2013-09-24 10:42:54 +08:00
|
|
|
if (m_assembly_profiler)
|
|
|
|
{
|
|
|
|
m_unwind_plan_fast_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
|
|
|
|
if (!m_assembly_profiler->GetFastUnwindPlan (m_range, thread, *m_unwind_plan_fast_sp))
|
|
|
|
m_unwind_plan_fast_sp.reset();
|
|
|
|
}
|
2010-10-25 19:12:07 +08:00
|
|
|
}
|
2011-02-15 08:19:15 +08:00
|
|
|
return m_unwind_plan_fast_sp;
|
2010-09-10 15:49:16 +08:00
|
|
|
}
|
|
|
|
|
2011-02-15 08:19:15 +08:00
|
|
|
UnwindPlanSP
|
2010-09-10 15:49:16 +08:00
|
|
|
FuncUnwinders::GetUnwindPlanArchitectureDefault (Thread& thread)
|
|
|
|
{
|
2011-01-09 05:19:00 +08:00
|
|
|
// Lock the mutex to ensure we can always give out the most appropriate
|
|
|
|
// information. We want to make sure if someone requests an unwind
|
|
|
|
// plan, that they get one and don't run into a race condition where one
|
|
|
|
// thread has started to create the unwind plan and has put it into
|
2013-04-19 02:10:51 +08:00
|
|
|
// the unique pointer member variable, and have another thread enter this function
|
|
|
|
// and return the partially filled pointer contained in the unique pointer.
|
2011-01-09 05:19:00 +08:00
|
|
|
// We also want to make sure that we lock out other unwind plans from
|
|
|
|
// being accessed until this one is done creating itself in case someone
|
|
|
|
// had some code like:
|
|
|
|
// UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
|
|
|
|
// if (best_unwind_plan == NULL)
|
|
|
|
// best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
|
|
|
|
Mutex::Locker locker (m_mutex);
|
2011-02-15 08:19:15 +08:00
|
|
|
if (m_tried_unwind_arch_default == false && m_unwind_plan_arch_default_sp.get() == NULL)
|
2010-09-10 15:49:16 +08:00
|
|
|
{
|
2011-01-09 05:19:00 +08:00
|
|
|
m_tried_unwind_arch_default = true;
|
2011-01-08 08:05:12 +08:00
|
|
|
Address current_pc;
|
2012-02-18 13:35:26 +08:00
|
|
|
ProcessSP process_sp (thread.CalculateProcess());
|
|
|
|
if (process_sp)
|
2010-09-10 15:49:16 +08:00
|
|
|
{
|
2012-02-18 13:35:26 +08:00
|
|
|
ABI *abi = process_sp->GetABI().get();
|
2011-05-12 02:39:18 +08:00
|
|
|
if (abi)
|
|
|
|
{
|
|
|
|
m_unwind_plan_arch_default_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
|
|
|
|
if (m_unwind_plan_arch_default_sp)
|
|
|
|
abi->CreateDefaultUnwindPlan(*m_unwind_plan_arch_default_sp);
|
|
|
|
}
|
2010-09-10 15:49:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-15 08:19:15 +08:00
|
|
|
return m_unwind_plan_arch_default_sp;
|
2010-09-10 15:49:16 +08:00
|
|
|
}
|
|
|
|
|
2011-09-15 08:44:34 +08:00
|
|
|
UnwindPlanSP
|
|
|
|
FuncUnwinders::GetUnwindPlanArchitectureDefaultAtFunctionEntry (Thread& thread)
|
|
|
|
{
|
|
|
|
// Lock the mutex to ensure we can always give out the most appropriate
|
|
|
|
// information. We want to make sure if someone requests an unwind
|
|
|
|
// plan, that they get one and don't run into a race condition where one
|
|
|
|
// thread has started to create the unwind plan and has put it into
|
2013-04-19 02:10:51 +08:00
|
|
|
// the unique pointer member variable, and have another thread enter this function
|
|
|
|
// and return the partially filled pointer contained in the unique pointer.
|
2011-09-15 08:44:34 +08:00
|
|
|
// We also want to make sure that we lock out other unwind plans from
|
|
|
|
// being accessed until this one is done creating itself in case someone
|
|
|
|
// had some code like:
|
|
|
|
// UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
|
|
|
|
// if (best_unwind_plan == NULL)
|
|
|
|
// best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
|
|
|
|
Mutex::Locker locker (m_mutex);
|
|
|
|
if (m_tried_unwind_arch_default_at_func_entry == false && m_unwind_plan_arch_default_at_func_entry_sp.get() == NULL)
|
|
|
|
{
|
|
|
|
m_tried_unwind_arch_default_at_func_entry = true;
|
|
|
|
Address current_pc;
|
2012-02-18 13:35:26 +08:00
|
|
|
ProcessSP process_sp (thread.CalculateProcess());
|
|
|
|
if (process_sp)
|
2011-09-15 08:44:34 +08:00
|
|
|
{
|
2012-02-18 13:35:26 +08:00
|
|
|
ABI *abi = process_sp->GetABI().get();
|
2011-09-15 08:44:34 +08:00
|
|
|
if (abi)
|
|
|
|
{
|
|
|
|
m_unwind_plan_arch_default_at_func_entry_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
|
|
|
|
if (m_unwind_plan_arch_default_at_func_entry_sp)
|
|
|
|
abi->CreateFunctionEntryUnwindPlan(*m_unwind_plan_arch_default_at_func_entry_sp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-24 10:42:54 +08:00
|
|
|
return m_unwind_plan_arch_default_at_func_entry_sp;
|
2011-09-15 08:44:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-10 15:49:16 +08:00
|
|
|
Address&
|
|
|
|
FuncUnwinders::GetFirstNonPrologueInsn (Target& target)
|
|
|
|
{
|
|
|
|
if (m_first_non_prologue_insn.IsValid())
|
|
|
|
return m_first_non_prologue_insn;
|
2012-02-21 08:09:25 +08:00
|
|
|
ExecutionContext exe_ctx (target.shared_from_this(), false);
|
2013-09-24 10:42:54 +08:00
|
|
|
if (m_assembly_profiler)
|
|
|
|
m_assembly_profiler->FirstNonPrologueInsn (m_range, exe_ctx, m_first_non_prologue_insn);
|
2010-09-10 15:49:16 +08:00
|
|
|
return m_first_non_prologue_insn;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Address&
|
|
|
|
FuncUnwinders::GetFunctionStartAddress () const
|
|
|
|
{
|
|
|
|
return m_range.GetBaseAddress();
|
|
|
|
}
|
|
|
|
|
2012-10-26 14:08:58 +08:00
|
|
|
void
|
|
|
|
FuncUnwinders::InvalidateNonCallSiteUnwindPlan (lldb_private::Thread& thread)
|
|
|
|
{
|
|
|
|
UnwindPlanSP arch_default = GetUnwindPlanArchitectureDefault (thread);
|
|
|
|
if (arch_default && m_tried_unwind_at_call_site)
|
|
|
|
{
|
|
|
|
m_unwind_plan_call_site_sp = arch_default;
|
|
|
|
}
|
|
|
|
}
|