[lldb][NFC] Fix all formatting errors in .cpp file headers
Summary:
A *.cpp file header in LLDB (and in LLDB) should like this:
```
//===-- TestUtilities.cpp -------------------------------------------------===//
```
However in LLDB most of our source files have arbitrary changes to this format and
these changes are spreading through LLDB as folks usually just use the existing
source files as templates for their new files (most notably the unnecessary
editor language indicator `-*- C++ -*-` is spreading and in every review
someone is pointing out that this is wrong, resulting in people pointing out that this
is done in the same way in other files).
This patch removes most of these inconsistencies including the editor language indicators,
all the different missing/additional '-' characters, files that center the file name, missing
trailing `===//` (mostly caused by clang-format breaking the line).
Reviewers: aprantl, espindola, jfb, shafik, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: dexonsmith, wuzish, emaste, sdardis, nemanjai, kbarton, MaskRay, atanasyan, arphaman, jfb, abidh, jsji, JDevlieghere, usaxena95, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D73258
2020-01-24 15:23:27 +08:00
|
|
|
//===-- Communication.cpp -------------------------------------------------===//
|
2010-06-09 00:52:24 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2010-06-09 00:52:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "lldb/Core/Communication.h"
|
2017-04-07 05:28:29 +08:00
|
|
|
|
2014-09-10 04:54:56 +08:00
|
|
|
#include "lldb/Host/HostThread.h"
|
|
|
|
#include "lldb/Host/ThreadLauncher.h"
|
2017-06-27 18:33:14 +08:00
|
|
|
#include "lldb/Utility/Connection.h"
|
2018-11-12 07:16:43 +08:00
|
|
|
#include "lldb/Utility/ConstString.h"
|
2018-12-14 23:59:49 +08:00
|
|
|
#include "lldb/Utility/Event.h"
|
|
|
|
#include "lldb/Utility/Listener.h"
|
2017-03-04 04:56:28 +08:00
|
|
|
#include "lldb/Utility/Log.h"
|
2018-11-12 07:16:43 +08:00
|
|
|
#include "lldb/Utility/Logging.h"
|
|
|
|
#include "lldb/Utility/Status.h"
|
2017-04-07 05:28:29 +08:00
|
|
|
|
2018-11-12 07:16:43 +08:00
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/ADT/Optional.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2017-04-07 05:28:29 +08:00
|
|
|
|
2018-11-12 07:16:43 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <chrono>
|
2017-04-07 05:28:29 +08:00
|
|
|
#include <cstring>
|
2018-11-12 07:16:43 +08:00
|
|
|
#include <memory>
|
2017-04-07 05:28:29 +08:00
|
|
|
|
2018-11-12 07:16:43 +08:00
|
|
|
#include <errno.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <stdio.h>
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
using namespace lldb;
|
|
|
|
using namespace lldb_private;
|
|
|
|
|
2012-02-16 14:50:00 +08:00
|
|
|
ConstString &Communication::GetStaticBroadcasterClass() {
|
|
|
|
static ConstString class_name("lldb.communication");
|
|
|
|
return class_name;
|
|
|
|
}
|
|
|
|
|
2016-05-18 09:59:10 +08:00
|
|
|
Communication::Communication(const char *name)
|
|
|
|
: Broadcaster(nullptr, name), m_connection_sp(),
|
|
|
|
m_read_thread_enabled(false), m_read_thread_did_exit(false), m_bytes(),
|
|
|
|
m_bytes_mutex(), m_write_mutex(), m_synchronize_mutex(),
|
|
|
|
m_callback(nullptr), m_callback_baton(nullptr), m_close_on_eof(true)
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
{
|
2019-07-23 07:48:01 +08:00
|
|
|
|
|
|
|
LLDB_LOG(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_OBJECT |
|
|
|
|
LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::Communication (name = {1})", this, name);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2016-05-18 09:59:10 +08:00
|
|
|
SetEventName(eBroadcastBitDisconnected, "disconnected");
|
|
|
|
SetEventName(eBroadcastBitReadThreadGotBytes, "got bytes");
|
|
|
|
SetEventName(eBroadcastBitReadThreadDidExit, "read thread did exit");
|
|
|
|
SetEventName(eBroadcastBitReadThreadShouldExit, "read thread should exit");
|
|
|
|
SetEventName(eBroadcastBitPacketAvailable, "packet available");
|
|
|
|
SetEventName(eBroadcastBitNoMorePendingInput, "no more pending input");
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2012-02-16 14:50:00 +08:00
|
|
|
CheckInWithManager();
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Communication::~Communication() {
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_OBJECT |
|
|
|
|
LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::~Communication (name = {1})", this,
|
2019-07-23 07:48:01 +08:00
|
|
|
GetBroadcasterName().AsCString());
|
2010-06-09 00:52:24 +08:00
|
|
|
Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Communication::Clear() {
|
2016-03-02 09:09:03 +08:00
|
|
|
SetReadThreadBytesReceivedCallback(nullptr, nullptr);
|
|
|
|
Disconnect(nullptr);
|
|
|
|
StopReadThread(nullptr);
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
ConnectionStatus Communication::Connect(const char *url, Status *error_ptr) {
|
2010-06-09 00:52:24 +08:00
|
|
|
Clear();
|
|
|
|
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::Connect (url = {1})", this, url);
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2010-12-14 10:59:59 +08:00
|
|
|
lldb::ConnectionSP connection_sp(m_connection_sp);
|
2016-03-02 09:09:03 +08:00
|
|
|
if (connection_sp)
|
2010-12-14 10:59:59 +08:00
|
|
|
return connection_sp->Connect(url, error_ptr);
|
2010-06-09 00:52:24 +08:00
|
|
|
if (error_ptr)
|
|
|
|
error_ptr->SetErrorString("Invalid connection.");
|
|
|
|
return eConnectionStatusNoConnection;
|
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
ConnectionStatus Communication::Disconnect(Status *error_ptr) {
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::Disconnect ()", this);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2010-12-14 10:59:59 +08:00
|
|
|
lldb::ConnectionSP connection_sp(m_connection_sp);
|
2016-03-02 09:09:03 +08:00
|
|
|
if (connection_sp) {
|
2010-12-14 10:59:59 +08:00
|
|
|
ConnectionStatus status = connection_sp->Disconnect(error_ptr);
|
2018-05-01 00:49:04 +08:00
|
|
|
// We currently don't protect connection_sp with any mutex for multi-
|
|
|
|
// threaded environments. So lets not nuke our connection class without
|
|
|
|
// putting some multi-threaded protections in. We also probably don't want
|
|
|
|
// to pay for the overhead it might cause if every time we access the
|
|
|
|
// connection we have to take a lock.
|
2010-12-13 05:50:57 +08:00
|
|
|
//
|
2018-05-01 00:49:04 +08:00
|
|
|
// This unique pointer will cleanup after itself when this object goes
|
|
|
|
// away, so there is no need to currently have it destroy itself
|
2018-05-29 17:10:46 +08:00
|
|
|
// immediately upon disconnect.
|
2010-12-14 10:59:59 +08:00
|
|
|
// connection_sp.reset();
|
2010-06-09 00:52:24 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
return eConnectionStatusNoConnection;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Communication::IsConnected() const {
|
2016-03-02 09:09:03 +08:00
|
|
|
lldb::ConnectionSP connection_sp(m_connection_sp);
|
|
|
|
return (connection_sp ? connection_sp->IsConnected() : false);
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Communication::HasConnection() const {
|
2016-03-02 09:09:03 +08:00
|
|
|
return m_connection_sp.get() != nullptr;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2016-11-25 19:58:44 +08:00
|
|
|
size_t Communication::Read(void *dst, size_t dst_len,
|
|
|
|
const Timeout<std::micro> &timeout,
|
2017-05-12 12:51:55 +08:00
|
|
|
ConnectionStatus &status, Status *error_ptr) {
|
2017-02-10 19:49:33 +08:00
|
|
|
Log *log = GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION);
|
|
|
|
LLDB_LOG(
|
|
|
|
log,
|
|
|
|
"this = {0}, dst = {1}, dst_len = {2}, timeout = {3}, connection = {4}",
|
|
|
|
this, dst, dst_len, timeout, m_connection_sp.get());
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2016-03-08 05:50:25 +08:00
|
|
|
if (m_read_thread_enabled) {
|
|
|
|
// We have a dedicated read thread that is getting data for us
|
2010-06-09 00:52:24 +08:00
|
|
|
size_t cached_bytes = GetCachedBytes(dst, dst_len);
|
2016-11-25 19:58:44 +08:00
|
|
|
if (cached_bytes > 0 || (timeout && timeout->count() == 0)) {
|
2016-03-02 09:09:03 +08:00
|
|
|
status = eConnectionStatusSuccess;
|
2010-06-09 00:52:24 +08:00
|
|
|
return cached_bytes;
|
|
|
|
}
|
|
|
|
|
2016-03-02 09:09:03 +08:00
|
|
|
if (!m_connection_sp) {
|
2016-05-03 22:07:41 +08:00
|
|
|
if (error_ptr)
|
|
|
|
error_ptr->SetErrorString("Invalid connection.");
|
|
|
|
status = eConnectionStatusNoConnection;
|
|
|
|
return 0;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ListenerSP listener_sp(Listener::MakeListener("Communication::Read"));
|
|
|
|
listener_sp->StartListeningForEvents(
|
|
|
|
this, eBroadcastBitReadThreadGotBytes | eBroadcastBitReadThreadDidExit);
|
|
|
|
EventSP event_sp;
|
2016-11-30 18:41:42 +08:00
|
|
|
while (listener_sp->GetEvent(event_sp, timeout)) {
|
2010-06-09 00:52:24 +08:00
|
|
|
const uint32_t event_type = event_sp->GetType();
|
|
|
|
if (event_type & eBroadcastBitReadThreadGotBytes) {
|
|
|
|
return GetCachedBytes(dst, dst_len);
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
if (event_type & eBroadcastBitReadThreadDidExit) {
|
|
|
|
if (GetCloseOnEOF())
|
|
|
|
Disconnect(nullptr);
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-06-09 00:52:24 +08:00
|
|
|
return 0;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
// We aren't using a read thread, just read the data synchronously in this
|
|
|
|
// thread.
|
2016-11-25 19:58:44 +08:00
|
|
|
return ReadFromConnection(dst, dst_len, timeout, status, error_ptr);
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t Communication::Write(const void *src, size_t src_len,
|
2017-05-12 12:51:55 +08:00
|
|
|
ConnectionStatus &status, Status *error_ptr) {
|
2010-12-14 10:59:59 +08:00
|
|
|
lldb::ConnectionSP connection_sp(m_connection_sp);
|
|
|
|
|
2016-05-18 09:59:10 +08:00
|
|
|
std::lock_guard<std::mutex> guard(m_write_mutex);
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::Write (src = {1}, src_len = %" PRIu64
|
|
|
|
") connection = {2}",
|
2019-07-23 07:48:01 +08:00
|
|
|
this, src, (uint64_t)src_len, connection_sp.get());
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2016-03-02 09:09:03 +08:00
|
|
|
if (connection_sp)
|
2010-12-14 10:59:59 +08:00
|
|
|
return connection_sp->Write(src, src_len, status, error_ptr);
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
if (error_ptr)
|
|
|
|
error_ptr->SetErrorString("Invalid connection.");
|
|
|
|
status = eConnectionStatusNoConnection;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
bool Communication::StartReadThread(Status *error_ptr) {
|
2011-03-24 12:28:38 +08:00
|
|
|
if (error_ptr)
|
|
|
|
error_ptr->Clear();
|
|
|
|
|
2014-09-24 02:32:09 +08:00
|
|
|
if (m_read_thread.IsJoinable())
|
2010-06-09 00:52:24 +08:00
|
|
|
return true;
|
|
|
|
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::StartReadThread ()", this);
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
char thread_name[1024];
|
2016-03-08 05:50:25 +08:00
|
|
|
snprintf(thread_name, sizeof(thread_name), "<lldb.comm.%s>",
|
|
|
|
GetBroadcasterName().AsCString());
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2010-09-15 13:19:45 +08:00
|
|
|
m_read_thread_enabled = true;
|
2015-03-12 18:12:41 +08:00
|
|
|
m_read_thread_did_exit = false;
|
2019-07-06 01:42:08 +08:00
|
|
|
auto maybe_thread = ThreadLauncher::LaunchThread(
|
|
|
|
thread_name, Communication::ReadThread, this);
|
|
|
|
if (maybe_thread) {
|
|
|
|
m_read_thread = *maybe_thread;
|
|
|
|
} else {
|
|
|
|
if (error_ptr)
|
|
|
|
*error_ptr = Status(maybe_thread.takeError());
|
|
|
|
else {
|
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_HOST),
|
|
|
|
"failed to launch host thread: {}",
|
|
|
|
llvm::toString(maybe_thread.takeError()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-24 02:32:09 +08:00
|
|
|
if (!m_read_thread.IsJoinable())
|
2010-09-15 13:19:45 +08:00
|
|
|
m_read_thread_enabled = false;
|
2019-07-06 01:42:08 +08:00
|
|
|
|
2010-07-23 23:43:25 +08:00
|
|
|
return m_read_thread_enabled;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
bool Communication::StopReadThread(Status *error_ptr) {
|
2014-09-24 02:32:09 +08:00
|
|
|
if (!m_read_thread.IsJoinable())
|
2010-06-09 00:52:24 +08:00
|
|
|
return true;
|
|
|
|
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::StopReadThread ()", this);
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
m_read_thread_enabled = false;
|
|
|
|
|
2016-03-02 09:09:03 +08:00
|
|
|
BroadcastEvent(eBroadcastBitReadThreadShouldExit, nullptr);
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2014-09-10 04:54:56 +08:00
|
|
|
// error = m_read_thread.Cancel();
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
Status error = m_read_thread.Join(nullptr);
|
2014-09-10 04:54:56 +08:00
|
|
|
return error.Success();
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
bool Communication::JoinReadThread(Status *error_ptr) {
|
2014-09-24 02:32:09 +08:00
|
|
|
if (!m_read_thread.IsJoinable())
|
2014-01-31 02:52:57 +08:00
|
|
|
return true;
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
Status error = m_read_thread.Join(nullptr);
|
2014-09-10 04:54:56 +08:00
|
|
|
return error.Success();
|
2014-01-31 02:52:57 +08:00
|
|
|
}
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
size_t Communication::GetCachedBytes(void *dst, size_t dst_len) {
|
2016-05-18 09:59:10 +08:00
|
|
|
std::lock_guard<std::recursive_mutex> guard(m_bytes_mutex);
|
2016-03-02 09:09:03 +08:00
|
|
|
if (!m_bytes.empty()) {
|
2018-05-01 00:49:04 +08:00
|
|
|
// If DST is nullptr and we have a thread, then return the number of bytes
|
|
|
|
// that are available so the caller can call again
|
2016-03-02 09:09:03 +08:00
|
|
|
if (dst == nullptr)
|
2010-06-09 00:52:24 +08:00
|
|
|
return m_bytes.size();
|
|
|
|
|
|
|
|
const size_t len = std::min<size_t>(dst_len, m_bytes.size());
|
|
|
|
|
2010-07-21 06:52:08 +08:00
|
|
|
::memcpy(dst, m_bytes.c_str(), len);
|
2010-06-09 00:52:24 +08:00
|
|
|
m_bytes.erase(m_bytes.begin(), m_bytes.begin() + len);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-20 04:47:54 +08:00
|
|
|
void Communication::AppendBytesToCache(const uint8_t *bytes, size_t len,
|
|
|
|
bool broadcast,
|
|
|
|
ConnectionStatus status) {
|
2019-07-23 07:48:01 +08:00
|
|
|
LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION),
|
2019-07-24 01:03:37 +08:00
|
|
|
"{0} Communication::AppendBytesToCache (src = {1}, src_len = {2}, "
|
|
|
|
"broadcast = {3})",
|
2019-07-23 07:48:01 +08:00
|
|
|
this, bytes, (uint64_t)len, broadcast);
|
2016-03-02 09:09:03 +08:00
|
|
|
if ((bytes == nullptr || len == 0) &&
|
2011-02-04 04:02:43 +08:00
|
|
|
(status != lldb::eConnectionStatusEndOfFile))
|
2010-06-09 00:52:24 +08:00
|
|
|
return;
|
|
|
|
if (m_callback) {
|
|
|
|
// If the user registered a callback, then call it and do not broadcast
|
|
|
|
m_callback(m_callback_baton, bytes, len);
|
2016-03-02 09:09:03 +08:00
|
|
|
} else if (bytes != nullptr && len > 0) {
|
2016-05-18 09:59:10 +08:00
|
|
|
std::lock_guard<std::recursive_mutex> guard(m_bytes_mutex);
|
2010-06-09 00:52:24 +08:00
|
|
|
m_bytes.append((const char *)bytes, len);
|
|
|
|
if (broadcast)
|
|
|
|
BroadcastEventIfUnique(eBroadcastBitReadThreadGotBytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-17 09:22:15 +08:00
|
|
|
size_t Communication::ReadFromConnection(void *dst, size_t dst_len,
|
2016-11-25 19:58:44 +08:00
|
|
|
const Timeout<std::micro> &timeout,
|
2011-06-17 09:22:15 +08:00
|
|
|
ConnectionStatus &status,
|
2017-05-12 12:51:55 +08:00
|
|
|
Status *error_ptr) {
|
2016-03-02 09:09:03 +08:00
|
|
|
lldb::ConnectionSP connection_sp(m_connection_sp);
|
2016-11-25 20:22:32 +08:00
|
|
|
if (connection_sp)
|
|
|
|
return connection_sp->Read(dst, dst_len, timeout, status, error_ptr);
|
2016-11-25 19:58:44 +08:00
|
|
|
|
|
|
|
if (error_ptr)
|
|
|
|
error_ptr->SetErrorString("Invalid connection.");
|
|
|
|
status = eConnectionStatusNoConnection;
|
|
|
|
return 0;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Communication::ReadThreadIsRunning() { return m_read_thread_enabled; }
|
|
|
|
|
2013-08-23 20:44:05 +08:00
|
|
|
lldb::thread_result_t Communication::ReadThread(lldb::thread_arg_t p) {
|
2010-06-09 00:52:24 +08:00
|
|
|
Communication *comm = (Communication *)p;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2013-03-28 07:08:40 +08:00
|
|
|
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION));
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "%p Communication::ReadThread () thread starting...", p);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-06-19 07:52:14 +08:00
|
|
|
uint8_t buf[1024];
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
Status error;
|
2010-06-09 00:52:24 +08:00
|
|
|
ConnectionStatus status = eConnectionStatusSuccess;
|
2015-04-20 17:52:47 +08:00
|
|
|
bool done = false;
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
bool disconnect = false;
|
2014-08-22 01:16:26 +08:00
|
|
|
while (!done && comm->m_read_thread_enabled) {
|
|
|
|
size_t bytes_read = comm->ReadFromConnection(
|
2016-11-25 19:58:44 +08:00
|
|
|
buf, sizeof(buf), std::chrono::seconds(5), status, &error);
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
if (bytes_read > 0 || status == eConnectionStatusEndOfFile)
|
2014-08-22 01:16:26 +08:00
|
|
|
comm->AppendBytesToCache(buf, bytes_read, true, status);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2010-10-30 05:48:37 +08:00
|
|
|
switch (status) {
|
2010-10-26 11:11:13 +08:00
|
|
|
case eConnectionStatusSuccess:
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
|
|
|
|
2010-12-04 10:22:36 +08:00
|
|
|
case eConnectionStatusEndOfFile:
|
2014-08-22 01:16:26 +08:00
|
|
|
done = true;
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
disconnect = comm->GetCloseOnEOF();
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
2014-08-22 01:16:26 +08:00
|
|
|
case eConnectionStatusError: // Check GetError() for details
|
|
|
|
if (error.GetType() == eErrorTypePOSIX && error.GetError() == EIO) {
|
|
|
|
// EIO on a pipe is usually caused by remote shutdown
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
disconnect = comm->GetCloseOnEOF();
|
2014-08-22 01:16:26 +08:00
|
|
|
done = true;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2017-02-07 02:31:44 +08:00
|
|
|
if (error.Fail())
|
|
|
|
LLDB_LOG(log, "error: {0}, status = {1}", error,
|
|
|
|
Communication::ConnectionStatusAsCString(status));
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
2015-03-12 18:12:41 +08:00
|
|
|
case eConnectionStatusInterrupted: // Synchronization signal from
|
|
|
|
// SynchronizeWithReadThread()
|
|
|
|
// The connection returns eConnectionStatusInterrupted only when there is
|
2018-05-01 00:49:04 +08:00
|
|
|
// no input pending to be read, so we can signal that.
|
2015-03-12 18:12:41 +08:00
|
|
|
comm->BroadcastEvent(eBroadcastBitNoMorePendingInput);
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
2010-06-09 00:52:24 +08:00
|
|
|
case eConnectionStatusNoConnection: // No connection
|
|
|
|
case eConnectionStatusLostConnection: // Lost connection while connected to
|
|
|
|
// a valid connection
|
2014-08-22 01:16:26 +08:00
|
|
|
done = true;
|
2016-02-16 12:14:33 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2010-06-09 00:52:24 +08:00
|
|
|
case eConnectionStatusTimedOut: // Request timed out
|
2017-02-07 02:31:44 +08:00
|
|
|
if (error.Fail())
|
|
|
|
LLDB_LOG(log, "error: {0}, status = {1}", error,
|
|
|
|
Communication::ConnectionStatusAsCString(status));
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-10-30 05:48:37 +08:00
|
|
|
log = lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_COMMUNICATION);
|
2010-06-09 00:52:24 +08:00
|
|
|
if (log)
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "%p Communication::ReadThread () thread exiting...", p);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2020-04-09 20:41:51 +08:00
|
|
|
// Handle threads wishing to synchronize with us.
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
{
|
2020-04-09 20:41:51 +08:00
|
|
|
// Prevent new ones from showing up.
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
comm->m_read_thread_did_exit = true;
|
2020-04-09 20:41:51 +08:00
|
|
|
|
|
|
|
// Unblock any existing thread waiting for the synchronization signal.
|
|
|
|
comm->BroadcastEvent(eBroadcastBitNoMorePendingInput);
|
|
|
|
|
|
|
|
// Wait for the thread to finish...
|
|
|
|
std::lock_guard<std::mutex> guard(comm->m_synchronize_mutex);
|
|
|
|
// ... and disconnect.
|
Recommit "[lldb/Core] Fix a race in the Communication class"
The synchronization logic in the previous had a subtle bug. Moving of
the "m_read_thread_did_exit = true" into the critical section made it
possible for some threads calling SynchronizeWithReadThread call to get
stuck. This could happen if there were already past the point where they
checked this variable. In that case, they would block on waiting for the
eBroadcastBitNoMorePendingInput event, which would never come as the
read thread was blocked on getting the synchronization mutex.
The new version moves that line out of the critical section and before
the sending of the eBroadcastBitNoMorePendingInput event, and also adds
some comments to explain why the things need to be in this sequence:
- m_read_thread_did_exit = true: prevents new threads for waiting on
events
- eBroadcastBitNoMorePendingInput: unblock any current thread waiting
for the event
- Disconnect(): close the connection. This is the only bit that needs to
be in the critical section, and this is to ensure that we don't close
the connection while the synchronizing thread is mucking with it.
Original commit message follows:
Communication::SynchronizeWithReadThread is called whenever a process
stops to ensure that we process all of its stdout before we report the
stop. If the process exits, we first call this method, and then close
the connection.
However, when the child process exits, the thread reading its stdout
will usually (but not always) read an EOF because the other end of the
pty has been closed. In response to an EOF, the Communication read
thread closes it's end of the connection too.
This can result in a race where the read thread is closing the
connection while the synchronizing thread is attempting to get its
attention via Connection::InterruptRead.
The fix is to hold the synchronization mutex while closing the
connection.
I've found this issue while tracking down a rare flake in some of the
vscode tests. I am not sure this is the cause of those failures (as I
would have expected this issue to manifest itself differently), but it
is an issue nonetheless.
The attached test demonstrates the steps needed to reproduce the race.
It will fail under tsan without this patch.
Reviewers: clayborg, JDevlieghere
Subscribers: mgorny, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D77295
2020-04-02 19:54:54 +08:00
|
|
|
if (disconnect)
|
|
|
|
comm->Disconnect();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Let clients know that this thread is exiting
|
2010-06-09 00:52:24 +08:00
|
|
|
comm->BroadcastEvent(eBroadcastBitReadThreadDidExit);
|
[lldb] fix cannot convert from 'nullptr' to 'lldb::thread_result_t'
Summary:
On Windows `lldb::thread_result_t` resolves to `typedef unsigned thread_result_t;` and on other platforms it resolves to `typedef void *thread_result_t;`.
Therefore one cannot use `nullptr` when returning from a function that returns `thread_result_t`.
I've made this change because a windows build bot fails with these errors:
```
E:\build_slave\lldb-x64-windows-ninja\llvm\tools\lldb\source\Core\Communication.cpp(362): error C2440: 'return': cannot convert from 'nullptr' to 'lldb::thread_result_t'
E:\build_slave\lldb-x64-windows-ninja\llvm\tools\lldb\source\Core\Communication.cpp(362): note: A native nullptr can only be converted to bool or, using reinterpret_cast, to an integral type
```
and
```
E:\build_slave\lldb-x64-windows-ninja\llvm\tools\lldb\source\Core\Debugger.cpp(1619): error C2440: 'return': cannot convert from 'nullptr' to 'lldb::thread_result_t'
E:\build_slave\lldb-x64-windows-ninja\llvm\tools\lldb\source\Core\Debugger.cpp(1619): note: A native nullptr can only be converted to bool or, using reinterpret_cast, to an integral type
E:\build_slave\lldb-x64-windows-ninja\llvm\tools\lldb\source\Core\Debugger.cpp(1664): error C2440: 'return': cannot convert from 'nullptr' to 'lldb::thread_result_t'
E:\build_slave\lldb-x64-windows-ninja\llvm\tools\lldb\source\Core\Debugger.cpp(1664): note: A native nullptr can only be converted to bool or, using reinterpret_cast, to an integral type
```
This is the failing build: http://lab.llvm.org:8011/builders/lldb-x64-windows-ninja/builds/5035/steps/build/logs/stdio
Reviewers: JDevlieghere, teemperor, jankratochvil, labath, clayborg, RKSimon, courbet, jhenderson
Reviewed By: labath, clayborg
Subscribers: labath, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D62305
llvm-svn: 361503
2019-05-23 23:17:39 +08:00
|
|
|
return {};
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2016-03-02 09:09:03 +08:00
|
|
|
void Communication::SetReadThreadBytesReceivedCallback(
|
|
|
|
ReadThreadBytesReceived callback, void *callback_baton) {
|
2010-06-09 00:52:24 +08:00
|
|
|
m_callback = callback;
|
|
|
|
m_callback_baton = callback_baton;
|
|
|
|
}
|
|
|
|
|
2015-03-12 18:12:41 +08:00
|
|
|
void Communication::SynchronizeWithReadThread() {
|
|
|
|
// Only one thread can do the synchronization dance at a time.
|
2016-05-18 09:59:10 +08:00
|
|
|
std::lock_guard<std::mutex> guard(m_synchronize_mutex);
|
2015-03-12 18:12:41 +08:00
|
|
|
|
|
|
|
// First start listening for the synchronization event.
|
2016-03-08 05:50:25 +08:00
|
|
|
ListenerSP listener_sp(
|
|
|
|
Listener::MakeListener("Communication::SyncronizeWithReadThread"));
|
|
|
|
listener_sp->StartListeningForEvents(this, eBroadcastBitNoMorePendingInput);
|
2015-03-12 18:12:41 +08:00
|
|
|
|
|
|
|
// If the thread is not running, there is no point in synchronizing.
|
|
|
|
if (!m_read_thread_enabled || m_read_thread_did_exit)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Notify the read thread.
|
|
|
|
m_connection_sp->InterruptRead();
|
|
|
|
|
|
|
|
// Wait for the synchronization event.
|
|
|
|
EventSP event_sp;
|
2016-11-30 18:41:42 +08:00
|
|
|
listener_sp->GetEvent(event_sp, llvm::None);
|
2015-03-12 18:12:41 +08:00
|
|
|
}
|
|
|
|
|
2020-04-02 20:40:59 +08:00
|
|
|
void Communication::SetConnection(std::unique_ptr<Connection> connection) {
|
2016-03-02 09:09:03 +08:00
|
|
|
Disconnect(nullptr);
|
|
|
|
StopReadThread(nullptr);
|
2020-04-02 20:40:59 +08:00
|
|
|
m_connection_sp = std::move(connection);
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2010-10-26 11:11:13 +08:00
|
|
|
|
|
|
|
const char *
|
|
|
|
Communication::ConnectionStatusAsCString(lldb::ConnectionStatus status) {
|
|
|
|
switch (status) {
|
|
|
|
case eConnectionStatusSuccess:
|
|
|
|
return "success";
|
|
|
|
case eConnectionStatusError:
|
|
|
|
return "error";
|
|
|
|
case eConnectionStatusTimedOut:
|
|
|
|
return "timed out";
|
|
|
|
case eConnectionStatusNoConnection:
|
|
|
|
return "no connection";
|
|
|
|
case eConnectionStatusLostConnection:
|
|
|
|
return "lost connection";
|
2011-03-20 12:57:14 +08:00
|
|
|
case eConnectionStatusEndOfFile:
|
|
|
|
return "end of file";
|
2014-05-02 08:45:31 +08:00
|
|
|
case eConnectionStatusInterrupted:
|
|
|
|
return "interrupted";
|
2010-10-26 11:11:13 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2010-10-26 11:11:13 +08:00
|
|
|
static char unknown_state_string[64];
|
|
|
|
snprintf(unknown_state_string, sizeof(unknown_state_string),
|
|
|
|
"ConnectionStatus = %i", status);
|
|
|
|
return unknown_state_string;
|
|
|
|
}
|