forked from OSchip/llvm-project
<rdar://problem/15263540>
Added a new key that we understand for the "qHostInfo" packet: "default_packet_timeout:T;" where T is a default packet timeout in seconds. This allows GDB servers with known slow packet response times to increase the default timeout to a value that makes sense for the connection. llvm-svn: 193425
This commit is contained in:
parent
d334b1a326
commit
9ac6d2db73
|
@ -90,7 +90,11 @@ GDBRemoteCommunicationClient::GDBRemoteCommunicationClient(bool is_platform) :
|
|||
m_process_arch(),
|
||||
m_os_version_major (UINT32_MAX),
|
||||
m_os_version_minor (UINT32_MAX),
|
||||
m_os_version_update (UINT32_MAX)
|
||||
m_os_version_update (UINT32_MAX),
|
||||
m_os_build (),
|
||||
m_os_kernel (),
|
||||
m_hostname (),
|
||||
m_default_packet_timeout (0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1299,6 +1303,15 @@ GDBRemoteCommunicationClient::GetHostInfo (bool force)
|
|||
else
|
||||
--num_keys_decoded;
|
||||
}
|
||||
else if (name.compare("default_packet_timeout") == 0)
|
||||
{
|
||||
m_default_packet_timeout = Args::StringToUInt32(value.c_str(), 0);
|
||||
if (m_default_packet_timeout > 0)
|
||||
{
|
||||
SetPacketTimeout(m_default_packet_timeout);
|
||||
++num_keys_decoded;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -1433,6 +1446,14 @@ GDBRemoteCommunicationClient::GetHostArchitecture ()
|
|||
return m_host_arch;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
GDBRemoteCommunicationClient::GetHostDefaultPacketTimeout ()
|
||||
{
|
||||
if (m_qHostInfo_is_valid == eLazyBoolCalculate)
|
||||
GetHostInfo ();
|
||||
return m_default_packet_timeout;
|
||||
}
|
||||
|
||||
addr_t
|
||||
GDBRemoteCommunicationClient::AllocateMemory (size_t size, uint32_t permissions)
|
||||
{
|
||||
|
|
|
@ -221,6 +221,9 @@ public:
|
|||
const lldb_private::ArchSpec &
|
||||
GetHostArchitecture ();
|
||||
|
||||
uint32_t
|
||||
GetHostDefaultPacketTimeout();
|
||||
|
||||
const lldb_private::ArchSpec &
|
||||
GetProcessArchitecture ();
|
||||
|
||||
|
@ -476,6 +479,7 @@ protected:
|
|||
std::string m_os_build;
|
||||
std::string m_os_kernel;
|
||||
std::string m_hostname;
|
||||
uint32_t m_default_packet_timeout;
|
||||
|
||||
bool
|
||||
DecodeProcessInfoResponse (StringExtractorGDBRemote &response,
|
||||
|
|
|
@ -138,6 +138,14 @@ namespace {
|
|||
const uint32_t idx = ePropertyPacketTimeout;
|
||||
return m_collection_sp->GetPropertyAtIndexAsUInt64(NULL, idx, g_properties[idx].default_uint_value);
|
||||
}
|
||||
|
||||
bool
|
||||
SetPacketTimeout(uint64_t timeout)
|
||||
{
|
||||
const uint32_t idx = ePropertyPacketTimeout;
|
||||
return m_collection_sp->SetPropertyAtIndexAsUInt64(NULL, idx, timeout);
|
||||
}
|
||||
|
||||
FileSpec
|
||||
GetTargetDefinitionFile () const
|
||||
{
|
||||
|
@ -536,6 +544,16 @@ ProcessGDBRemote::BuildDynamicRegisterInfo (bool force)
|
|||
}
|
||||
}
|
||||
|
||||
// Check if qHostInfo specified a specific packet timeout for this connection.
|
||||
// If so then lets update our setting so the user knows what the timeout is
|
||||
// and can see it.
|
||||
const uint32_t host_packet_timeout = m_gdb_comm.GetHostDefaultPacketTimeout();
|
||||
if (host_packet_timeout)
|
||||
{
|
||||
GetGlobalPluginProperties()->SetPacketTimeout(host_packet_timeout);
|
||||
}
|
||||
|
||||
|
||||
if (reg_num == 0)
|
||||
{
|
||||
FileSpec target_definition_fspec = GetGlobalPluginProperties()->GetTargetDefinitionFile ();
|
||||
|
|
Loading…
Reference in New Issue