[lldb][NFC] Fix all formatting errors in .cpp file headers
Summary:
A *.cpp file header in LLDB (and in LLDB) should like this:
```
//===-- TestUtilities.cpp -------------------------------------------------===//
```
However in LLDB most of our source files have arbitrary changes to this format and
these changes are spreading through LLDB as folks usually just use the existing
source files as templates for their new files (most notably the unnecessary
editor language indicator `-*- C++ -*-` is spreading and in every review
someone is pointing out that this is wrong, resulting in people pointing out that this
is done in the same way in other files).
This patch removes most of these inconsistencies including the editor language indicators,
all the different missing/additional '-' characters, files that center the file name, missing
trailing `===//` (mostly caused by clang-format breaking the line).
Reviewers: aprantl, espindola, jfb, shafik, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: dexonsmith, wuzish, emaste, sdardis, nemanjai, kbarton, MaskRay, atanasyan, arphaman, jfb, abidh, jsji, JDevlieghere, usaxena95, lldb-commits
Tags: #lldb
Differential Revision: https://reviews.llvm.org/D73258
2020-01-24 15:23:27 +08:00
|
|
|
//===-- GDBRemoteCommunication.cpp ----------------------------------------===//
|
2010-06-09 00:52:24 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2010-06-09 00:52:24 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "GDBRemoteCommunication.h"
|
|
|
|
|
2018-11-14 03:18:16 +08:00
|
|
|
#include <future>
|
2011-05-14 04:07:25 +08:00
|
|
|
#include <limits.h>
|
2011-03-26 02:16:28 +08:00
|
|
|
#include <string.h>
|
2013-12-05 03:19:12 +08:00
|
|
|
#include <sys/stat.h>
|
2011-03-26 02:16:28 +08:00
|
|
|
|
2012-04-10 06:46:21 +08:00
|
|
|
#include "lldb/Core/StreamFile.h"
|
2019-08-09 05:42:33 +08:00
|
|
|
#include "lldb/Host/Config.h"
|
2014-10-07 05:22:36 +08:00
|
|
|
#include "lldb/Host/ConnectionFileDescriptor.h"
|
2018-11-02 01:09:25 +08:00
|
|
|
#include "lldb/Host/FileSystem.h"
|
2011-04-12 13:54:46 +08:00
|
|
|
#include "lldb/Host/Host.h"
|
2014-08-22 01:29:12 +08:00
|
|
|
#include "lldb/Host/HostInfo.h"
|
2015-01-14 07:19:40 +08:00
|
|
|
#include "lldb/Host/Pipe.h"
|
2019-05-03 02:15:03 +08:00
|
|
|
#include "lldb/Host/ProcessLaunchInfo.h"
|
2014-08-07 02:16:26 +08:00
|
|
|
#include "lldb/Host/Socket.h"
|
2015-01-16 04:08:35 +08:00
|
|
|
#include "lldb/Host/StringConvert.h"
|
2014-09-10 04:54:56 +08:00
|
|
|
#include "lldb/Host/ThreadLauncher.h"
|
2018-11-14 03:18:16 +08:00
|
|
|
#include "lldb/Host/common/TCPSocket.h"
|
|
|
|
#include "lldb/Host/posix/ConnectionFileDescriptorPosix.h"
|
2015-10-20 04:44:01 +08:00
|
|
|
#include "lldb/Target/Platform.h"
|
2019-05-03 02:15:03 +08:00
|
|
|
#include "lldb/Utility/Event.h"
|
2017-03-23 02:40:07 +08:00
|
|
|
#include "lldb/Utility/FileSpec.h"
|
2017-03-04 04:56:28 +08:00
|
|
|
#include "lldb/Utility/Log.h"
|
2017-02-03 05:39:50 +08:00
|
|
|
#include "lldb/Utility/RegularExpression.h"
|
2019-12-11 07:04:02 +08:00
|
|
|
#include "lldb/Utility/Reproducer.h"
|
2017-02-03 05:39:50 +08:00
|
|
|
#include "lldb/Utility/StreamString.h"
|
2015-02-06 00:29:12 +08:00
|
|
|
#include "llvm/ADT/SmallString.h"
|
2016-08-08 20:54:36 +08:00
|
|
|
#include "llvm/Support/ScopedPrinter.h"
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
#include "ProcessGDBRemoteLog.h"
|
|
|
|
|
2014-07-23 07:41:36 +08:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
#define DEBUGSERVER_BASENAME "debugserver"
|
2019-08-14 07:50:54 +08:00
|
|
|
#elif defined(_WIN32)
|
|
|
|
#define DEBUGSERVER_BASENAME "lldb-server.exe"
|
2014-07-23 07:41:36 +08:00
|
|
|
#else
|
2015-02-18 23:39:41 +08:00
|
|
|
#define DEBUGSERVER_BASENAME "lldb-server"
|
2014-07-23 07:41:36 +08:00
|
|
|
#endif
|
2011-04-12 13:54:46 +08:00
|
|
|
|
2019-01-25 16:21:47 +08:00
|
|
|
#if defined(HAVE_LIBCOMPRESSION)
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
#include <compression.h>
|
|
|
|
#endif
|
|
|
|
|
2020-03-03 16:45:14 +08:00
|
|
|
#if defined(HAVE_LIBZ)
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
#include <zlib.h>
|
|
|
|
#endif
|
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
using namespace lldb;
|
|
|
|
using namespace lldb_private;
|
2015-03-31 17:52:22 +08:00
|
|
|
using namespace lldb_private::process_gdb_remote;
|
2010-06-09 00:52:24 +08:00
|
|
|
|
|
|
|
// GDBRemoteCommunication constructor
|
2016-07-29 01:32:20 +08:00
|
|
|
GDBRemoteCommunication::GDBRemoteCommunication(const char *comm_name,
|
|
|
|
const char *listener_name)
|
|
|
|
: Communication(comm_name),
|
2013-08-27 07:57:52 +08:00
|
|
|
#ifdef LLDB_CONFIGURATION_DEBUG
|
2016-07-29 01:32:20 +08:00
|
|
|
m_packet_timeout(1000),
|
2013-08-27 07:57:52 +08:00
|
|
|
#else
|
2016-07-29 01:32:20 +08:00
|
|
|
m_packet_timeout(1),
|
2013-08-27 07:57:52 +08:00
|
|
|
#endif
|
2016-07-29 01:32:20 +08:00
|
|
|
m_echo_number(0), m_supports_qEcho(eLazyBoolCalculate), m_history(512),
|
|
|
|
m_send_acks(true), m_compression_type(CompressionType::None),
|
2019-01-25 16:21:47 +08:00
|
|
|
m_listen_url() {
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Destructor
|
|
|
|
GDBRemoteCommunication::~GDBRemoteCommunication() {
|
|
|
|
if (IsConnected()) {
|
|
|
|
Disconnect();
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2019-01-25 16:21:47 +08:00
|
|
|
#if defined(HAVE_LIBCOMPRESSION)
|
2018-12-19 07:45:45 +08:00
|
|
|
if (m_decompression_scratch)
|
|
|
|
free (m_decompression_scratch);
|
2019-01-25 16:21:47 +08:00
|
|
|
#endif
|
2018-12-19 07:45:45 +08:00
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// Stop the communications read thread which is used to parse all incoming
|
|
|
|
// packets. This function will block until the read thread returns.
|
2015-06-16 23:50:18 +08:00
|
|
|
if (m_read_thread_enabled)
|
|
|
|
StopReadThread();
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2016-08-27 23:52:29 +08:00
|
|
|
char GDBRemoteCommunication::CalculcateChecksum(llvm::StringRef payload) {
|
2010-06-09 00:52:24 +08:00
|
|
|
int checksum = 0;
|
|
|
|
|
2016-08-27 23:52:29 +08:00
|
|
|
for (char c : payload)
|
|
|
|
checksum += c;
|
2013-08-20 22:12:58 +08:00
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
return checksum & 255;
|
|
|
|
}
|
|
|
|
|
2011-01-22 15:12:45 +08:00
|
|
|
size_t GDBRemoteCommunication::SendAck() {
|
2013-03-28 07:08:40 +08:00
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PACKETS));
|
2010-06-09 00:52:24 +08:00
|
|
|
ConnectionStatus status = eConnectionStatusSuccess;
|
2012-04-10 06:46:21 +08:00
|
|
|
char ch = '+';
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
const size_t bytes_written = Write(&ch, 1, status, nullptr);
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "<%4" PRIu64 "> send packet: %c", (uint64_t)bytes_written, ch);
|
2019-09-14 07:14:10 +08:00
|
|
|
m_history.AddPacket(ch, GDBRemotePacket::ePacketTypeSend, bytes_written);
|
2012-04-10 06:46:21 +08:00
|
|
|
return bytes_written;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2011-01-22 15:12:45 +08:00
|
|
|
size_t GDBRemoteCommunication::SendNack() {
|
2013-03-28 07:08:40 +08:00
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PACKETS));
|
2011-01-22 15:12:45 +08:00
|
|
|
ConnectionStatus status = eConnectionStatusSuccess;
|
2012-04-10 06:46:21 +08:00
|
|
|
char ch = '-';
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
const size_t bytes_written = Write(&ch, 1, status, nullptr);
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "<%4" PRIu64 "> send packet: %c", (uint64_t)bytes_written, ch);
|
2019-09-14 07:14:10 +08:00
|
|
|
m_history.AddPacket(ch, GDBRemotePacket::ePacketTypeSend, bytes_written);
|
2012-04-10 06:46:21 +08:00
|
|
|
return bytes_written;
|
Many improvements to the Platform base class and subclasses. The base Platform
class now implements the Host functionality for a lot of things that make
sense by default so that subclasses can check:
int
PlatformSubclass::Foo ()
{
if (IsHost())
return Platform::Foo (); // Let the platform base class do the host specific stuff
// Platform subclass specific code...
int result = ...
return result;
}
Added new functions to the platform:
virtual const char *Platform::GetUserName (uint32_t uid);
virtual const char *Platform::GetGroupName (uint32_t gid);
The user and group names are cached locally so that remote platforms can avoid
sending packets multiple times to resolve this information.
Added the parent process ID to the ProcessInfo class.
Added a new ProcessInfoMatch class which helps us to match processes up
and changed the Host layer over to using this new class. The new class allows
us to search for processs:
1 - by name (equal to, starts with, ends with, contains, and regex)
2 - by pid
3 - And further check for parent pid == value, uid == value, gid == value,
euid == value, egid == value, arch == value, parent == value.
This is all hookup up to the "platform process list" command which required
adding dumping routines to dump process information. If the Host class
implements the process lookup routines, you can now lists processes on
your local machine:
machine1.foo.com % lldb
(lldb) platform process list
PID PARENT USER GROUP EFF USER EFF GROUP TRIPLE NAME
====== ====== ========== ========== ========== ========== ======================== ============================
99538 1 username usergroup username usergroup x86_64-apple-darwin FileMerge
94943 1 username usergroup username usergroup x86_64-apple-darwin mdworker
94852 244 username usergroup username usergroup x86_64-apple-darwin Safari
94727 244 username usergroup username usergroup x86_64-apple-darwin Xcode
92742 92710 username usergroup username usergroup i386-apple-darwin debugserver
This of course also works remotely with the lldb-platform:
machine1.foo.com % lldb-platform --listen 1234
machine2.foo.com % lldb
(lldb) platform create remote-macosx
Platform: remote-macosx
Connected: no
(lldb) platform connect connect://localhost:1444
Platform: remote-macosx
Triple: x86_64-apple-darwin
OS Version: 10.6.7 (10J869)
Kernel: Darwin Kernel Version 10.7.0: Sat Jan 29 15:17:16 PST 2011; root:xnu-1504.9.37~1/RELEASE_I386
Hostname: machine1.foo.com
Connected: yes
(lldb) platform process list
PID PARENT USER GROUP EFF USER EFF GROUP TRIPLE NAME
====== ====== ========== ========== ========== ========== ======================== ============================
99556 244 username usergroup username usergroup x86_64-apple-darwin trustevaluation
99548 65539 username usergroup username usergroup x86_64-apple-darwin lldb
99538 1 username usergroup username usergroup x86_64-apple-darwin FileMerge
94943 1 username usergroup username usergroup x86_64-apple-darwin mdworker
94852 244 username usergroup username usergroup x86_64-apple-darwin Safari
The lldb-platform implements everything with the Host:: layer, so this should
"just work" for linux. I will probably be adding more stuff to the Host layer
for launching processes and attaching to processes so that this support should
eventually just work as well.
Modified the target to be able to be created with an architecture that differs
from the main executable. This is needed for iOS debugging since we can have
an "armv6" binary which can run on an "armv7" machine, so we want to be able
to do:
% lldb
(lldb) platform create remote-ios
(lldb) file --arch armv7 a.out
Where "a.out" is an armv6 executable. The platform then can correctly decide
to open all "armv7" images for all dependent shared libraries.
Modified the disassembly to show the current PC value. Example output:
(lldb) disassemble --frame
a.out`main:
0x1eb7: pushl %ebp
0x1eb8: movl %esp, %ebp
0x1eba: pushl %ebx
0x1ebb: subl $20, %esp
0x1ebe: calll 0x1ec3 ; main + 12 at test.c:18
0x1ec3: popl %ebx
-> 0x1ec4: calll 0x1f12 ; getpid
0x1ec9: movl %eax, 4(%esp)
0x1ecd: leal 199(%ebx), %eax
0x1ed3: movl %eax, (%esp)
0x1ed6: calll 0x1f18 ; printf
0x1edb: leal 213(%ebx), %eax
0x1ee1: movl %eax, (%esp)
0x1ee4: calll 0x1f1e ; puts
0x1ee9: calll 0x1f0c ; getchar
0x1eee: movl $20, (%esp)
0x1ef5: calll 0x1e6a ; sleep_loop at test.c:6
0x1efa: movl $12, %eax
0x1eff: addl $20, %esp
0x1f02: popl %ebx
0x1f03: leave
0x1f04: ret
This can be handy when dealing with the new --line options that was recently
added:
(lldb) disassemble --line
a.out`main + 13 at test.c:19
18 {
-> 19 printf("Process: %i\n\n", getpid());
20 puts("Press any key to continue..."); getchar();
-> 0x1ec4: calll 0x1f12 ; getpid
0x1ec9: movl %eax, 4(%esp)
0x1ecd: leal 199(%ebx), %eax
0x1ed3: movl %eax, (%esp)
0x1ed6: calll 0x1f18 ; printf
Modified the ModuleList to have a lookup based solely on a UUID. Since the
UUID is typically the MD5 checksum of a binary image, there is no need
to give the path and architecture when searching for a pre-existing
image in an image list.
Now that we support remote debugging a bit better, our lldb_private::Module
needs to be able to track what the original path for file was as the platform
knows it, as well as where the file is locally. The module has the two
following functions to retrieve both paths:
const FileSpec &Module::GetFileSpec () const;
const FileSpec &Module::GetPlatformFileSpec () const;
llvm-svn: 128563
2011-03-31 02:16:51 +08:00
|
|
|
}
|
|
|
|
|
2013-12-07 05:45:27 +08:00
|
|
|
GDBRemoteCommunication::PacketResult
|
2016-08-27 23:52:29 +08:00
|
|
|
GDBRemoteCommunication::SendPacketNoLock(llvm::StringRef payload) {
|
2019-02-08 02:22:00 +08:00
|
|
|
StreamString packet(0, 4, eByteOrderBig);
|
|
|
|
packet.PutChar('$');
|
|
|
|
packet.Write(payload.data(), payload.size());
|
|
|
|
packet.PutChar('#');
|
|
|
|
packet.PutHex8(CalculcateChecksum(payload));
|
2020-01-29 03:23:46 +08:00
|
|
|
std::string packet_str = std::string(packet.GetString());
|
2019-02-08 02:22:00 +08:00
|
|
|
|
|
|
|
return SendRawPacketNoLock(packet_str);
|
2018-11-14 03:18:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
GDBRemoteCommunication::PacketResult
|
|
|
|
GDBRemoteCommunication::SendRawPacketNoLock(llvm::StringRef packet,
|
|
|
|
bool skip_ack) {
|
|
|
|
if (IsConnected()) {
|
2014-09-18 08:17:36 +08:00
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PACKETS));
|
|
|
|
ConnectionStatus status = eConnectionStatusSuccess;
|
2018-11-14 03:18:16 +08:00
|
|
|
const char *packet_data = packet.data();
|
|
|
|
const size_t packet_length = packet.size();
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
size_t bytes_written = Write(packet_data, packet_length, status, nullptr);
|
2016-09-07 04:57:50 +08:00
|
|
|
if (log) {
|
2014-09-18 08:17:36 +08:00
|
|
|
size_t binary_start_offset = 0;
|
|
|
|
if (strncmp(packet_data, "$vFile:pwrite:", strlen("$vFile:pwrite:")) ==
|
2016-09-07 04:57:50 +08:00
|
|
|
0) {
|
2014-09-18 08:17:36 +08:00
|
|
|
const char *first_comma = strchr(packet_data, ',');
|
|
|
|
if (first_comma) {
|
|
|
|
const char *second_comma = strchr(first_comma + 1, ',');
|
|
|
|
if (second_comma)
|
|
|
|
binary_start_offset = second_comma - packet_data + 1;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// If logging was just enabled and we have history, then dump out what we
|
|
|
|
// have to the log so we get the historical context. The Dump() call that
|
2012-04-10 06:46:21 +08:00
|
|
|
// logs all of the packet will set a boolean so that we don't dump this
|
2018-05-01 00:49:04 +08:00
|
|
|
// more than once
|
2014-09-18 08:17:36 +08:00
|
|
|
if (!m_history.DidDumpToLog())
|
2013-03-28 07:08:40 +08:00
|
|
|
m_history.Dump(log);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2014-09-18 08:17:36 +08:00
|
|
|
if (binary_start_offset) {
|
|
|
|
StreamString strm;
|
|
|
|
// Print non binary data header
|
|
|
|
strm.Printf("<%4" PRIu64 "> send packet: %.*s", (uint64_t)bytes_written,
|
|
|
|
(int)binary_start_offset, packet_data);
|
|
|
|
const uint8_t *p;
|
|
|
|
// Print binary data exactly as sent
|
2015-05-12 09:10:56 +08:00
|
|
|
for (p = (const uint8_t *)packet_data + binary_start_offset; *p != '#';
|
2016-09-07 04:57:50 +08:00
|
|
|
++p)
|
2014-09-18 08:17:36 +08:00
|
|
|
strm.Printf("\\x%2.2x", *p);
|
|
|
|
// Print the checksum
|
|
|
|
strm.Printf("%*s", (int)3, p);
|
2016-11-17 05:15:24 +08:00
|
|
|
log->PutString(strm.GetString());
|
2016-09-07 04:57:50 +08:00
|
|
|
} else
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "<%4" PRIu64 "> send packet: %.*s",
|
|
|
|
(uint64_t)bytes_written, (int)packet_length, packet_data);
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2012-04-10 06:46:21 +08:00
|
|
|
|
2018-11-14 03:18:16 +08:00
|
|
|
m_history.AddPacket(packet.str(), packet_length,
|
2019-09-14 07:14:10 +08:00
|
|
|
GDBRemotePacket::ePacketTypeSend, bytes_written);
|
2012-04-10 06:46:21 +08:00
|
|
|
|
2014-09-18 08:17:36 +08:00
|
|
|
if (bytes_written == packet_length) {
|
2018-11-14 03:18:16 +08:00
|
|
|
if (!skip_ack && GetSendAcks())
|
2013-12-07 05:45:27 +08:00
|
|
|
return GetAck();
|
|
|
|
else
|
|
|
|
return PacketResult::Success;
|
2010-09-15 06:10:43 +08:00
|
|
|
} else {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "error: failed to send packet: %.*s", (int)packet_length,
|
|
|
|
packet_data);
|
2010-09-15 07:36:40 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2013-12-07 05:45:27 +08:00
|
|
|
return PacketResult::ErrorSendFailed;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2013-12-07 05:45:27 +08:00
|
|
|
GDBRemoteCommunication::PacketResult GDBRemoteCommunication::GetAck() {
|
2011-03-22 12:00:09 +08:00
|
|
|
StringExtractorGDBRemote packet;
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
PacketResult result = ReadPacket(packet, GetPacketTimeout(), false);
|
2013-12-07 05:45:27 +08:00
|
|
|
if (result == PacketResult::Success) {
|
|
|
|
if (packet.GetResponseType() ==
|
|
|
|
StringExtractorGDBRemote::ResponseType::eAck)
|
|
|
|
return PacketResult::Success;
|
|
|
|
else
|
|
|
|
return PacketResult::ErrorSendAck;
|
|
|
|
}
|
|
|
|
return result;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2018-01-10 22:39:08 +08:00
|
|
|
GDBRemoteCommunication::PacketResult
|
|
|
|
GDBRemoteCommunication::ReadPacketWithOutputSupport(
|
|
|
|
StringExtractorGDBRemote &response, Timeout<std::micro> timeout,
|
|
|
|
bool sync_on_timeout,
|
|
|
|
llvm::function_ref<void(llvm::StringRef)> output_callback) {
|
|
|
|
auto result = ReadPacket(response, timeout, sync_on_timeout);
|
|
|
|
while (result == PacketResult::Success && response.IsNormalResponse() &&
|
|
|
|
response.PeekChar() == 'O') {
|
|
|
|
response.GetChar();
|
|
|
|
std::string output;
|
|
|
|
if (response.GetHexByteString(output))
|
|
|
|
output_callback(output);
|
|
|
|
result = ReadPacket(response, timeout, sync_on_timeout);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-06-16 23:50:18 +08:00
|
|
|
GDBRemoteCommunication::PacketResult
|
|
|
|
GDBRemoteCommunication::ReadPacket(StringExtractorGDBRemote &response,
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
Timeout<std::micro> timeout,
|
2015-06-16 23:50:18 +08:00
|
|
|
bool sync_on_timeout) {
|
|
|
|
if (m_read_thread_enabled)
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
return PopPacketFromQueue(response, timeout);
|
2015-06-16 23:50:18 +08:00
|
|
|
else
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
return WaitForPacketNoLock(response, timeout, sync_on_timeout);
|
2015-06-16 23:50:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// This function is called when a packet is requested.
|
|
|
|
// A whole packet is popped from the packet queue and returned to the caller.
|
2018-05-01 00:49:04 +08:00
|
|
|
// Packets are placed into this queue from the communication read thread. See
|
|
|
|
// GDBRemoteCommunication::AppendBytesToCache.
|
2015-06-16 23:50:18 +08:00
|
|
|
GDBRemoteCommunication::PacketResult
|
|
|
|
GDBRemoteCommunication::PopPacketFromQueue(StringExtractorGDBRemote &response,
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
Timeout<std::micro> timeout) {
|
|
|
|
auto pred = [&] { return !m_packet_queue.empty() && IsConnected(); };
|
|
|
|
// lock down the packet queue
|
|
|
|
std::unique_lock<std::mutex> lock(m_packet_queue_mutex);
|
|
|
|
|
|
|
|
if (!timeout)
|
|
|
|
m_condition_queue_not_empty.wait(lock, pred);
|
|
|
|
else {
|
|
|
|
if (!m_condition_queue_not_empty.wait_for(lock, *timeout, pred))
|
|
|
|
return PacketResult::ErrorReplyTimeout;
|
2015-06-16 23:50:18 +08:00
|
|
|
if (!IsConnected())
|
|
|
|
return PacketResult::ErrorDisconnected;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2015-06-16 23:50:18 +08:00
|
|
|
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
// get the front element of the queue
|
|
|
|
response = m_packet_queue.front();
|
|
|
|
|
|
|
|
// remove the front element
|
|
|
|
m_packet_queue.pop();
|
|
|
|
|
|
|
|
// we got a packet
|
|
|
|
return PacketResult::Success;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2013-03-28 07:08:40 +08:00
|
|
|
GDBRemoteCommunication::PacketResult
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
GDBRemoteCommunication::WaitForPacketNoLock(StringExtractorGDBRemote &packet,
|
|
|
|
Timeout<std::micro> timeout,
|
|
|
|
bool sync_on_timeout) {
|
2013-03-28 07:08:40 +08:00
|
|
|
uint8_t buffer[8192];
|
2017-05-12 12:51:55 +08:00
|
|
|
Status error;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2017-02-07 03:31:09 +08:00
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PACKETS));
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2013-03-28 07:08:40 +08:00
|
|
|
// Check for a packet from our cache first without trying any reading...
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
if (CheckForPacket(nullptr, 0, packet) != PacketType::Invalid)
|
2013-12-07 05:45:27 +08:00
|
|
|
return PacketResult::Success;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-07-03 07:21:06 +08:00
|
|
|
bool timed_out = false;
|
2013-12-07 05:45:27 +08:00
|
|
|
bool disconnected = false;
|
2013-03-28 07:08:40 +08:00
|
|
|
while (IsConnected() && !timed_out) {
|
2011-07-19 09:13:00 +08:00
|
|
|
lldb::ConnectionStatus status = eConnectionStatusNoConnection;
|
2016-11-25 19:58:44 +08:00
|
|
|
size_t bytes_read = Read(buffer, sizeof(buffer), timeout, status, &error);
|
2011-07-07 09:59:51 +08:00
|
|
|
|
2017-02-07 03:31:09 +08:00
|
|
|
LLDB_LOGV(log,
|
2017-02-10 19:49:33 +08:00
|
|
|
"Read(buffer, sizeof(buffer), timeout = {0}, "
|
2017-02-07 03:31:09 +08:00
|
|
|
"status = {1}, error = {2}) => bytes_read = {3}",
|
2017-02-10 19:49:33 +08:00
|
|
|
timeout, Communication::ConnectionStatusAsCString(status), error,
|
2017-02-07 03:31:09 +08:00
|
|
|
bytes_read);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-05-27 22:12:34 +08:00
|
|
|
if (bytes_read > 0) {
|
|
|
|
if (CheckForPacket(buffer, bytes_read, packet) != PacketType::Invalid)
|
2013-12-07 05:45:27 +08:00
|
|
|
return PacketResult::Success;
|
2011-07-03 07:21:06 +08:00
|
|
|
} else {
|
2011-07-19 09:13:00 +08:00
|
|
|
switch (status) {
|
|
|
|
case eConnectionStatusTimedOut:
|
2011-07-07 09:59:51 +08:00
|
|
|
case eConnectionStatusInterrupted:
|
2011-06-17 09:22:15 +08:00
|
|
|
if (sync_on_timeout) {
|
|
|
|
/// Sync the remote GDB server and make sure we get a response that
|
2015-05-27 22:12:34 +08:00
|
|
|
/// corresponds to what we send.
|
2016-09-07 04:57:50 +08:00
|
|
|
///
|
2015-05-27 22:12:34 +08:00
|
|
|
/// Sends a "qEcho" packet and makes sure it gets the exact packet
|
|
|
|
/// echoed back. If the qEcho packet isn't supported, we send a qC
|
|
|
|
/// packet and make sure we get a valid thread ID back. We use the
|
2013-12-07 05:45:27 +08:00
|
|
|
/// "qC" packet since its response if very unique: is responds with
|
2014-05-02 08:45:31 +08:00
|
|
|
/// "QC%x" where %x is the thread ID of the current thread. This
|
2015-05-29 08:01:55 +08:00
|
|
|
/// makes the response unique enough from other packet responses to
|
|
|
|
/// ensure we are back on track.
|
2016-09-07 04:57:50 +08:00
|
|
|
///
|
2015-05-29 08:01:55 +08:00
|
|
|
/// This packet is needed after we time out sending a packet so we
|
|
|
|
/// can ensure that we are getting the response for the packet we
|
|
|
|
/// are sending. There are no sequence IDs in the GDB remote
|
|
|
|
/// protocol (there used to be, but they are not supported anymore)
|
|
|
|
/// so if you timeout sending packet "abc", you might then send
|
|
|
|
/// packet "cde" and get the response for the previous "abc" packet.
|
|
|
|
/// Many responses are "OK" or "" (unsupported) or "EXX" (error) so
|
|
|
|
/// many responses for packets can look like responses for other
|
|
|
|
/// packets. So if we timeout, we need to ensure that we can get
|
|
|
|
/// back on track. If we can't get back on track, we must
|
|
|
|
/// disconnect.
|
|
|
|
bool sync_success = false;
|
|
|
|
bool got_actual_response = false;
|
|
|
|
// We timed out, we need to sync back up with the
|
|
|
|
char echo_packet[32];
|
|
|
|
int echo_packet_len = 0;
|
|
|
|
RegularExpression response_regex;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-05-29 08:01:55 +08:00
|
|
|
if (m_supports_qEcho == eLazyBoolYes) {
|
|
|
|
echo_packet_len = ::snprintf(echo_packet, sizeof(echo_packet),
|
|
|
|
"qEcho:%u", ++m_echo_number);
|
|
|
|
std::string regex_str = "^";
|
|
|
|
regex_str += echo_packet;
|
|
|
|
regex_str += "$";
|
2019-08-20 17:24:20 +08:00
|
|
|
response_regex = RegularExpression(regex_str);
|
2015-05-29 08:01:55 +08:00
|
|
|
} else {
|
|
|
|
echo_packet_len =
|
|
|
|
::snprintf(echo_packet, sizeof(echo_packet), "qC");
|
2019-08-20 17:24:20 +08:00
|
|
|
response_regex =
|
|
|
|
RegularExpression(llvm::StringRef("^QC[0-9A-Fa-f]+$"));
|
2015-05-29 08:01:55 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2016-08-27 23:52:29 +08:00
|
|
|
PacketResult echo_packet_result =
|
2015-05-29 08:01:55 +08:00
|
|
|
SendPacketNoLock(llvm::StringRef(echo_packet, echo_packet_len));
|
|
|
|
if (echo_packet_result == PacketResult::Success) {
|
|
|
|
const uint32_t max_retries = 3;
|
|
|
|
uint32_t successful_responses = 0;
|
|
|
|
for (uint32_t i = 0; i < max_retries; ++i) {
|
|
|
|
StringExtractorGDBRemote echo_response;
|
Introduce chrono to more gdb-remote functions
Summary:
This replaces the usage of raw integers with duration classes in the gdb-remote
packet management functions. The values are still converted back to integers once
they go into the generic Communication class -- that I am leaving to a separate
change.
The changes are mostly straight-forward (*), the only tricky part was
representation of infinite timeouts.
Currently, we use UINT32_MAX to denote infinite timeout. This is not well suited
for duration classes, as they tend to do arithmetic on the values, and the
identity of the MAX value can easily get lost (e.g.
microseconds(seconds(UINT32_MAX)).count() != UINT32_MAX). We cannot use zero to
represent infinity (as Listener classes do) because we already use it to do
non-blocking polling reads. For this reason, I chose to have an explicit value
for infinity.
The way I achieved that is via llvm::Optional, and I think it reads quite
natural. Passing llvm::None as "timeout" means "no timeout", while passing zero
means "poll". The only tricky part is this breaks implicit conversions (seconds
are implicitly convertible to microseconds, but Optional<seconds> cannot be
easily converted into Optional<microseconds>). For this reason I added a special
class Timeout, inheriting from Optional, and enabling the necessary conversions
one would normally expect.
(*) The other tricky part was GDBRemoteCommunication::PopPacketFromQueue, which
was needlessly complicated. I've simplified it, but that one is only used in
non-stop mode, and so is untested.
Reviewers: clayborg, zturner, jingham
Subscribers: lldb-commits
Differential Revision: https://reviews.llvm.org/D26971
llvm-svn: 287864
2016-11-24 18:54:49 +08:00
|
|
|
echo_packet_result =
|
|
|
|
WaitForPacketNoLock(echo_response, timeout, false);
|
2015-05-29 08:01:55 +08:00
|
|
|
if (echo_packet_result == PacketResult::Success) {
|
|
|
|
++successful_responses;
|
2016-09-22 00:01:28 +08:00
|
|
|
if (response_regex.Execute(echo_response.GetStringRef())) {
|
2015-05-29 08:01:55 +08:00
|
|
|
sync_success = true;
|
|
|
|
break;
|
|
|
|
} else if (successful_responses == 1) {
|
|
|
|
// We got something else back as the first successful
|
2018-05-01 00:49:04 +08:00
|
|
|
// response, it probably is the response to the packet we
|
|
|
|
// actually wanted, so copy it over if this is the first
|
|
|
|
// success and continue to try to get the qEcho response
|
2015-05-29 08:01:55 +08:00
|
|
|
packet = echo_response;
|
|
|
|
got_actual_response = true;
|
|
|
|
}
|
2011-07-03 07:21:06 +08:00
|
|
|
} else if (echo_packet_result == PacketResult::ErrorReplyTimeout)
|
|
|
|
continue; // Packet timed out, continue waiting for a response
|
2016-09-07 04:57:50 +08:00
|
|
|
else
|
2011-07-03 07:21:06 +08:00
|
|
|
break; // Something else went wrong getting the packet back, we
|
2013-12-07 05:45:27 +08:00
|
|
|
// failed and are done trying
|
2011-06-17 09:22:15 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2015-05-29 08:01:55 +08:00
|
|
|
// We weren't able to sync back up with the server, we must abort
|
2018-05-01 00:49:04 +08:00
|
|
|
// otherwise all responses might not be from the right packets...
|
2015-05-29 08:01:55 +08:00
|
|
|
if (sync_success) {
|
|
|
|
// We timed out, but were able to recover
|
|
|
|
if (got_actual_response) {
|
|
|
|
// We initially timed out, but we did get a response that came in
|
2018-05-01 00:49:04 +08:00
|
|
|
// before the successful reply to our qEcho packet, so lets say
|
|
|
|
// everything is fine...
|
2013-12-07 05:45:27 +08:00
|
|
|
return PacketResult::Success;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
} else {
|
2013-12-07 05:45:27 +08:00
|
|
|
disconnected = true;
|
2015-05-29 08:01:55 +08:00
|
|
|
Disconnect();
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2011-07-03 07:21:06 +08:00
|
|
|
timed_out = true;
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
2011-07-03 07:21:06 +08:00
|
|
|
case eConnectionStatusSuccess:
|
2012-11-30 05:49:15 +08:00
|
|
|
// printf ("status = success but error = %s\n",
|
2011-07-03 07:21:06 +08:00
|
|
|
// error.AsCString("<invalid>"));
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
|
|
|
|
2011-06-17 09:22:15 +08:00
|
|
|
case eConnectionStatusEndOfFile:
|
|
|
|
case eConnectionStatusNoConnection:
|
|
|
|
case eConnectionStatusLostConnection:
|
|
|
|
case eConnectionStatusError:
|
2013-12-07 05:45:27 +08:00
|
|
|
disconnected = true;
|
2015-05-29 08:01:55 +08:00
|
|
|
Disconnect();
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2013-12-07 05:45:27 +08:00
|
|
|
packet.Clear();
|
|
|
|
if (disconnected)
|
|
|
|
return PacketResult::ErrorDisconnected;
|
|
|
|
if (timed_out)
|
|
|
|
return PacketResult::ErrorReplyTimeout;
|
|
|
|
else
|
|
|
|
return PacketResult::ErrorReplyFailed;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
bool GDBRemoteCommunication::DecompressPacket() {
|
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PACKETS));
|
|
|
|
|
2015-07-15 03:19:07 +08:00
|
|
|
if (!CompressionIsEnabled())
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return true;
|
2015-08-02 09:36:09 +08:00
|
|
|
|
|
|
|
size_t pkt_size = m_bytes.size();
|
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// Smallest possible compressed packet is $N#00 - an uncompressed empty
|
|
|
|
// reply, most commonly indicating an unsupported packet. Anything less than
|
|
|
|
// 5 characters, it's definitely not a compressed packet.
|
2015-08-02 09:36:09 +08:00
|
|
|
if (pkt_size < 5)
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return true;
|
|
|
|
|
2015-08-02 09:36:09 +08:00
|
|
|
if (m_bytes[0] != '$' && m_bytes[0] != '%')
|
|
|
|
return true;
|
|
|
|
if (m_bytes[1] != 'C' && m_bytes[1] != 'N')
|
|
|
|
return true;
|
|
|
|
|
|
|
|
size_t hash_mark_idx = m_bytes.find('#');
|
|
|
|
if (hash_mark_idx == std::string::npos)
|
|
|
|
return true;
|
|
|
|
if (hash_mark_idx + 2 >= m_bytes.size())
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!::isxdigit(m_bytes[hash_mark_idx + 1]) ||
|
|
|
|
!::isxdigit(m_bytes[hash_mark_idx + 2]))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
size_t content_length =
|
2015-08-02 09:36:09 +08:00
|
|
|
pkt_size -
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
5; // not counting '$', 'C' | 'N', '#', & the two hex checksum chars
|
2015-08-02 09:36:09 +08:00
|
|
|
size_t content_start = 2; // The first character of the
|
|
|
|
// compressed/not-compressed text of the packet
|
|
|
|
size_t checksum_idx =
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
hash_mark_idx +
|
2015-08-02 09:36:09 +08:00
|
|
|
1; // The first character of the two hex checksum characters
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
// Normally size_of_first_packet == m_bytes.size() but m_bytes may contain
|
2018-05-01 00:49:04 +08:00
|
|
|
// multiple packets. size_of_first_packet is the size of the initial packet
|
|
|
|
// which we'll replace with the decompressed version of, leaving the rest of
|
|
|
|
// m_bytes unmodified.
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
size_t size_of_first_packet = hash_mark_idx + 3;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
// Compressed packets ("$C") start with a base10 number which is the size of
|
2018-05-01 00:49:04 +08:00
|
|
|
// the uncompressed payload, then a : and then the compressed data. e.g.
|
|
|
|
// $C1024:<binary>#00 Update content_start and content_length to only include
|
|
|
|
// the <binary> part of the packet.
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
uint64_t decompressed_bufsize = ULONG_MAX;
|
|
|
|
if (m_bytes[1] == 'C') {
|
|
|
|
size_t i = content_start;
|
|
|
|
while (i < hash_mark_idx && isdigit(m_bytes[i]))
|
|
|
|
i++;
|
|
|
|
if (i < hash_mark_idx && m_bytes[i] == ':') {
|
|
|
|
i++;
|
|
|
|
content_start = i;
|
|
|
|
content_length = hash_mark_idx - content_start;
|
|
|
|
std::string bufsize_str(m_bytes.data() + 2, i - 2 - 1);
|
|
|
|
errno = 0;
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
decompressed_bufsize = ::strtoul(bufsize_str.c_str(), nullptr, 10);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (errno != 0 || decompressed_bufsize == ULONG_MAX) {
|
2015-08-02 09:36:09 +08:00
|
|
|
m_bytes.erase(0, size_of_first_packet);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (GetSendAcks()) {
|
|
|
|
char packet_checksum_cstr[3];
|
|
|
|
packet_checksum_cstr[0] = m_bytes[checksum_idx];
|
|
|
|
packet_checksum_cstr[1] = m_bytes[checksum_idx + 1];
|
|
|
|
packet_checksum_cstr[2] = '\0';
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
long packet_checksum = strtol(packet_checksum_cstr, nullptr, 16);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2016-08-27 23:52:29 +08:00
|
|
|
long actual_checksum = CalculcateChecksum(
|
|
|
|
llvm::StringRef(m_bytes).substr(1, hash_mark_idx - 1));
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
bool success = packet_checksum == actual_checksum;
|
|
|
|
if (!success) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"error: checksum mismatch: %.*s expected 0x%2.2x, got 0x%2.2x",
|
|
|
|
(int)(pkt_size), m_bytes.c_str(), (uint8_t)packet_checksum,
|
|
|
|
(uint8_t)actual_checksum);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
}
|
|
|
|
// Send the ack or nack if needed
|
|
|
|
if (!success) {
|
|
|
|
SendNack();
|
|
|
|
m_bytes.erase(0, size_of_first_packet);
|
|
|
|
return false;
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2011-06-17 09:22:15 +08:00
|
|
|
SendAck();
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
|
|
|
|
if (m_bytes[1] == 'N') {
|
2018-05-01 00:49:04 +08:00
|
|
|
// This packet was not compressed -- delete the 'N' character at the start
|
|
|
|
// and the packet may be processed as-is.
|
2014-11-04 05:02:54 +08:00
|
|
|
m_bytes.erase(1, 1);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return true;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// Reverse the gdb-remote binary escaping that was done to the compressed
|
|
|
|
// text to guard characters like '$', '#', '}', etc.
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
std::vector<uint8_t> unescaped_content;
|
|
|
|
unescaped_content.reserve(content_length);
|
|
|
|
size_t i = content_start;
|
|
|
|
while (i < hash_mark_idx) {
|
|
|
|
if (m_bytes[i] == '}') {
|
|
|
|
i++;
|
|
|
|
unescaped_content.push_back(m_bytes[i] ^ 0x20);
|
|
|
|
} else {
|
|
|
|
unescaped_content.push_back(m_bytes[i]);
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
i++;
|
|
|
|
}
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
|
|
|
|
uint8_t *decompressed_buffer = nullptr;
|
|
|
|
size_t decompressed_bytes = 0;
|
|
|
|
|
|
|
|
if (decompressed_bufsize != ULONG_MAX) {
|
2018-12-19 07:02:50 +08:00
|
|
|
decompressed_buffer = (uint8_t *)malloc(decompressed_bufsize);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (decompressed_buffer == nullptr) {
|
2015-08-02 09:36:09 +08:00
|
|
|
m_bytes.erase(0, size_of_first_packet);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
#if defined(HAVE_LIBCOMPRESSION)
|
2017-12-07 03:21:10 +08:00
|
|
|
if (m_compression_type == CompressionType::ZlibDeflate ||
|
|
|
|
m_compression_type == CompressionType::LZFSE ||
|
2019-05-24 03:32:46 +08:00
|
|
|
m_compression_type == CompressionType::LZ4 ||
|
2018-12-19 07:02:50 +08:00
|
|
|
m_compression_type == CompressionType::LZMA) {
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
compression_algorithm compression_type;
|
2017-01-24 13:06:14 +08:00
|
|
|
if (m_compression_type == CompressionType::LZFSE)
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
compression_type = COMPRESSION_LZFSE;
|
2017-01-24 13:06:14 +08:00
|
|
|
else if (m_compression_type == CompressionType::ZlibDeflate)
|
|
|
|
compression_type = COMPRESSION_ZLIB;
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
else if (m_compression_type == CompressionType::LZ4)
|
|
|
|
compression_type = COMPRESSION_LZ4_RAW;
|
|
|
|
else if (m_compression_type == CompressionType::LZMA)
|
|
|
|
compression_type = COMPRESSION_LZMA;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2018-12-19 07:02:50 +08:00
|
|
|
if (m_decompression_scratch_type != m_compression_type) {
|
|
|
|
if (m_decompression_scratch) {
|
|
|
|
free (m_decompression_scratch);
|
|
|
|
m_decompression_scratch = nullptr;
|
|
|
|
}
|
|
|
|
size_t scratchbuf_size = 0;
|
|
|
|
if (m_compression_type == CompressionType::LZFSE)
|
|
|
|
scratchbuf_size = compression_decode_scratch_buffer_size (COMPRESSION_LZFSE);
|
|
|
|
else if (m_compression_type == CompressionType::LZ4)
|
|
|
|
scratchbuf_size = compression_decode_scratch_buffer_size (COMPRESSION_LZ4_RAW);
|
|
|
|
else if (m_compression_type == CompressionType::ZlibDeflate)
|
|
|
|
scratchbuf_size = compression_decode_scratch_buffer_size (COMPRESSION_ZLIB);
|
|
|
|
else if (m_compression_type == CompressionType::LZMA)
|
|
|
|
scratchbuf_size = compression_decode_scratch_buffer_size (COMPRESSION_LZMA);
|
|
|
|
else if (m_compression_type == CompressionType::LZFSE)
|
|
|
|
scratchbuf_size = compression_decode_scratch_buffer_size (COMPRESSION_LZFSE);
|
|
|
|
if (scratchbuf_size > 0) {
|
|
|
|
m_decompression_scratch = (void*) malloc (scratchbuf_size);
|
|
|
|
m_decompression_scratch_type = m_compression_type;
|
|
|
|
}
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (decompressed_bufsize != ULONG_MAX && decompressed_buffer != nullptr) {
|
|
|
|
decompressed_bytes = compression_decode_buffer(
|
2018-12-19 07:02:50 +08:00
|
|
|
decompressed_buffer, decompressed_bufsize,
|
2019-05-24 03:32:46 +08:00
|
|
|
(uint8_t *)unescaped_content.data(), unescaped_content.size(),
|
2018-12-19 07:02:50 +08:00
|
|
|
m_decompression_scratch, compression_type);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
#endif
|
|
|
|
|
2020-03-03 16:45:14 +08:00
|
|
|
#if defined(HAVE_LIBZ)
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (decompressed_bytes == 0 && decompressed_bufsize != ULONG_MAX &&
|
|
|
|
decompressed_buffer != nullptr &&
|
|
|
|
m_compression_type == CompressionType::ZlibDeflate) {
|
|
|
|
z_stream stream;
|
|
|
|
memset(&stream, 0, sizeof(z_stream));
|
|
|
|
stream.next_in = (Bytef *)unescaped_content.data();
|
|
|
|
stream.avail_in = (uInt)unescaped_content.size();
|
|
|
|
stream.total_in = 0;
|
|
|
|
stream.next_out = (Bytef *)decompressed_buffer;
|
|
|
|
stream.avail_out = decompressed_bufsize;
|
|
|
|
stream.total_out = 0;
|
|
|
|
stream.zalloc = Z_NULL;
|
|
|
|
stream.zfree = Z_NULL;
|
|
|
|
stream.opaque = Z_NULL;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (inflateInit2(&stream, -15) == Z_OK) {
|
|
|
|
int status = inflate(&stream, Z_NO_FLUSH);
|
|
|
|
inflateEnd(&stream);
|
|
|
|
if (status == Z_STREAM_END) {
|
|
|
|
decompressed_bytes = stream.total_out;
|
|
|
|
}
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (decompressed_bytes == 0 || decompressed_buffer == nullptr) {
|
|
|
|
if (decompressed_buffer)
|
|
|
|
free(decompressed_buffer);
|
2015-08-02 09:36:09 +08:00
|
|
|
m_bytes.erase(0, size_of_first_packet);
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
std::string new_packet;
|
|
|
|
new_packet.reserve(decompressed_bytes + 6);
|
|
|
|
new_packet.push_back(m_bytes[0]);
|
|
|
|
new_packet.append((const char *)decompressed_buffer, decompressed_bytes);
|
|
|
|
new_packet.push_back('#');
|
|
|
|
if (GetSendAcks()) {
|
2016-08-27 23:52:29 +08:00
|
|
|
uint8_t decompressed_checksum = CalculcateChecksum(
|
|
|
|
llvm::StringRef((const char *)decompressed_buffer, decompressed_bytes));
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
char decompressed_checksum_str[3];
|
|
|
|
snprintf(decompressed_checksum_str, 3, "%02x", decompressed_checksum);
|
|
|
|
new_packet.append(decompressed_checksum_str);
|
|
|
|
} else {
|
|
|
|
new_packet.push_back('0');
|
|
|
|
new_packet.push_back('0');
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-08-02 09:36:09 +08:00
|
|
|
m_bytes.replace(0, size_of_first_packet, new_packet.data(),
|
|
|
|
new_packet.size());
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
free(decompressed_buffer);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-05-27 22:12:34 +08:00
|
|
|
GDBRemoteCommunication::PacketType
|
2011-06-17 09:22:15 +08:00
|
|
|
GDBRemoteCommunication::CheckForPacket(const uint8_t *src, size_t src_len,
|
|
|
|
StringExtractorGDBRemote &packet) {
|
2010-06-09 00:52:24 +08:00
|
|
|
// Put the packet data into the buffer in a thread safe fashion
|
2016-05-18 09:59:10 +08:00
|
|
|
std::lock_guard<std::recursive_mutex> guard(m_bytes_mutex);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2013-03-28 07:08:40 +08:00
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PACKETS));
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-06-17 09:22:15 +08:00
|
|
|
if (src && src_len > 0) {
|
2011-07-03 07:21:06 +08:00
|
|
|
if (log && log->GetVerbose()) {
|
2011-07-03 05:07:54 +08:00
|
|
|
StreamString s;
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "GDBRemoteCommunication::%s adding %u bytes: %.*s",
|
|
|
|
__FUNCTION__, (uint32_t)src_len, (uint32_t)src_len, src);
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2011-06-17 09:22:15 +08:00
|
|
|
m_bytes.append((const char *)src, src_len);
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2015-05-27 22:12:34 +08:00
|
|
|
bool isNotifyPacket = false;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
// Parse up the packets into gdb remote packets
|
2011-07-03 05:07:54 +08:00
|
|
|
if (!m_bytes.empty()) {
|
2018-05-01 00:49:04 +08:00
|
|
|
// end_idx must be one past the last valid packet byte. Start it off with
|
|
|
|
// an invalid value that is the same as the current index.
|
2011-06-17 09:22:15 +08:00
|
|
|
size_t content_start = 0;
|
2014-11-04 05:02:54 +08:00
|
|
|
size_t content_length = 0;
|
2011-06-17 09:22:15 +08:00
|
|
|
size_t total_length = 0;
|
|
|
|
size_t checksum_idx = std::string::npos;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2013-03-28 07:08:40 +08:00
|
|
|
// Size of packet before it is decompressed, for logging purposes
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
size_t original_packet_size = m_bytes.size();
|
|
|
|
if (CompressionIsEnabled()) {
|
2018-12-15 08:15:33 +08:00
|
|
|
if (!DecompressPacket()) {
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
packet.Clear();
|
|
|
|
return GDBRemoteCommunication::PacketType::Standard;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
}
|
2011-07-03 05:07:54 +08:00
|
|
|
|
2011-06-17 09:22:15 +08:00
|
|
|
switch (m_bytes[0]) {
|
2011-07-03 07:21:06 +08:00
|
|
|
case '+': // Look for ack
|
|
|
|
case '-': // Look for cancel
|
|
|
|
case '\x03': // ^C to halt target
|
|
|
|
content_length = total_length = 1; // The command is one byte long...
|
|
|
|
break;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-07-03 07:21:06 +08:00
|
|
|
case '%': // Async notify packet
|
2015-05-27 22:12:34 +08:00
|
|
|
isNotifyPacket = true;
|
2016-02-16 12:14:33 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2014-11-04 05:02:54 +08:00
|
|
|
case '$':
|
2011-07-03 07:21:06 +08:00
|
|
|
// Look for a standard gdb packet?
|
2016-09-07 04:57:50 +08:00
|
|
|
{
|
2014-11-04 05:02:54 +08:00
|
|
|
size_t hash_pos = m_bytes.find('#');
|
2011-07-03 07:21:06 +08:00
|
|
|
if (hash_pos != std::string::npos) {
|
2014-11-04 05:02:54 +08:00
|
|
|
if (hash_pos + 2 < m_bytes.size()) {
|
|
|
|
checksum_idx = hash_pos + 1;
|
|
|
|
// Skip the dollar sign
|
|
|
|
content_start = 1;
|
2018-05-01 00:49:04 +08:00
|
|
|
// Don't include the # in the content or the $ in the content
|
|
|
|
// length
|
2014-11-04 05:02:54 +08:00
|
|
|
content_length = hash_pos - 1;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-06-17 09:22:15 +08:00
|
|
|
total_length =
|
2014-11-04 05:02:54 +08:00
|
|
|
hash_pos + 3; // Skip the # and the two hex checksum bytes
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2014-11-04 05:02:54 +08:00
|
|
|
// Checksum bytes aren't all here yet
|
|
|
|
content_length = std::string::npos;
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default: {
|
2018-05-01 00:49:04 +08:00
|
|
|
// We have an unexpected byte and we need to flush all bad data that is
|
|
|
|
// in m_bytes, so we need to find the first byte that is a '+' (ACK), '-'
|
|
|
|
// (NACK), \x03 (CTRL+C interrupt), or '$' character (start of packet
|
|
|
|
// header) or of course, the end of the data in m_bytes...
|
2011-07-03 07:21:06 +08:00
|
|
|
const size_t bytes_len = m_bytes.size();
|
2014-11-04 05:02:54 +08:00
|
|
|
bool done = false;
|
2011-07-03 07:21:06 +08:00
|
|
|
uint32_t idx;
|
|
|
|
for (idx = 1; !done && idx < bytes_len; ++idx) {
|
|
|
|
switch (m_bytes[idx]) {
|
2014-11-04 05:02:54 +08:00
|
|
|
case '+':
|
|
|
|
case '-':
|
|
|
|
case '\x03':
|
2015-05-13 17:18:18 +08:00
|
|
|
case '%':
|
2014-11-04 05:02:54 +08:00
|
|
|
case '$':
|
2011-07-03 07:21:06 +08:00
|
|
|
done = true;
|
2016-09-07 04:57:50 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
2011-07-03 05:07:54 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "GDBRemoteCommunication::%s tossing %u junk bytes: '%.*s'",
|
|
|
|
__FUNCTION__, idx - 1, idx - 1, m_bytes.c_str());
|
2014-11-04 05:02:54 +08:00
|
|
|
m_bytes.erase(0, idx - 1);
|
2016-09-07 04:57:50 +08:00
|
|
|
} break;
|
2011-07-03 05:07:54 +08:00
|
|
|
}
|
2010-06-09 00:52:24 +08:00
|
|
|
|
2015-05-27 22:12:34 +08:00
|
|
|
if (content_length == std::string::npos) {
|
|
|
|
packet.Clear();
|
2010-06-09 00:52:24 +08:00
|
|
|
return GDBRemoteCommunication::PacketType::Invalid;
|
|
|
|
} else if (total_length > 0) {
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
// We have a valid packet...
|
2011-06-17 09:22:15 +08:00
|
|
|
assert(content_length <= m_bytes.size());
|
2010-06-09 00:52:24 +08:00
|
|
|
assert(total_length <= m_bytes.size());
|
2011-06-17 09:22:15 +08:00
|
|
|
assert(content_length <= total_length);
|
2010-06-09 00:52:24 +08:00
|
|
|
size_t content_end = content_start + content_length;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2010-06-09 00:52:24 +08:00
|
|
|
bool success = true;
|
2016-09-07 04:57:50 +08:00
|
|
|
if (log) {
|
2010-06-09 00:52:24 +08:00
|
|
|
// If logging was just enabled and we have history, then dump out what
|
|
|
|
// we have to the log so we get the historical context. The Dump() call
|
2018-05-01 00:49:04 +08:00
|
|
|
// that logs all of the packet will set a boolean so that we don't dump
|
|
|
|
// this more than once
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (!m_history.DidDumpToLog())
|
2013-03-28 07:08:40 +08:00
|
|
|
m_history.Dump(log);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
bool binary = false;
|
2018-05-01 00:49:04 +08:00
|
|
|
// Only detect binary for packets that start with a '$' and have a
|
|
|
|
// '#CC' checksum
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (m_bytes[0] == '$' && total_length > 4) {
|
|
|
|
for (size_t i = 0; !binary && i < total_length; ++i) {
|
2017-08-19 06:57:59 +08:00
|
|
|
unsigned char c = m_bytes[i];
|
|
|
|
if (isprint(c) == 0 && isspace(c) == 0) {
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
binary = true;
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
}
|
2014-11-04 05:02:54 +08:00
|
|
|
if (binary) {
|
|
|
|
StreamString strm;
|
|
|
|
// Packet header...
|
|
|
|
if (CompressionIsEnabled())
|
|
|
|
strm.Printf("<%4" PRIu64 ":%" PRIu64 "> read packet: %c",
|
|
|
|
(uint64_t)original_packet_size, (uint64_t)total_length,
|
|
|
|
m_bytes[0]);
|
|
|
|
else
|
|
|
|
strm.Printf("<%4" PRIu64 "> read packet: %c",
|
|
|
|
(uint64_t)total_length, m_bytes[0]);
|
|
|
|
for (size_t i = content_start; i < content_end; ++i) {
|
|
|
|
// Remove binary escaped bytes when displaying the packet...
|
|
|
|
const char ch = m_bytes[i];
|
|
|
|
if (ch == 0x7d) {
|
2018-05-01 00:49:04 +08:00
|
|
|
// 0x7d is the escape character. The next character is to be
|
|
|
|
// XOR'd with 0x20.
|
2014-11-04 05:02:54 +08:00
|
|
|
const char escapee = m_bytes[++i] ^ 0x20;
|
|
|
|
strm.Printf("%2.2x", escapee);
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2014-11-04 05:02:54 +08:00
|
|
|
strm.Printf("%2.2x", (uint8_t)ch);
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2014-11-04 05:02:54 +08:00
|
|
|
// Packet footer...
|
|
|
|
strm.Printf("%c%c%c", m_bytes[total_length - 3],
|
|
|
|
m_bytes[total_length - 2], m_bytes[total_length - 1]);
|
2016-11-17 05:15:24 +08:00
|
|
|
log->PutString(strm.GetString());
|
2014-11-04 05:02:54 +08:00
|
|
|
} else {
|
|
|
|
if (CompressionIsEnabled())
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "<%4" PRIu64 ":%" PRIu64 "> read packet: %.*s",
|
|
|
|
(uint64_t)original_packet_size, (uint64_t)total_length,
|
|
|
|
(int)(total_length), m_bytes.c_str());
|
2016-09-07 04:57:50 +08:00
|
|
|
else
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "<%4" PRIu64 "> read packet: %.*s",
|
|
|
|
(uint64_t)total_length, (int)(total_length),
|
|
|
|
m_bytes.c_str());
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2018-11-14 03:18:16 +08:00
|
|
|
m_history.AddPacket(m_bytes, total_length,
|
2019-09-14 07:14:10 +08:00
|
|
|
GDBRemotePacket::ePacketTypeRecv, total_length);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// Copy the packet from m_bytes to packet_str expanding the run-length
|
|
|
|
// encoding in the process. Reserve enough byte for the most common case
|
|
|
|
// (no RLE used)
|
2019-08-21 12:55:56 +08:00
|
|
|
std ::string packet_str;
|
2013-08-28 18:31:52 +08:00
|
|
|
packet_str.reserve(m_bytes.length());
|
2011-06-17 09:22:15 +08:00
|
|
|
for (std::string::const_iterator c = m_bytes.begin() + content_start;
|
2015-05-27 22:12:34 +08:00
|
|
|
c != m_bytes.begin() + content_end; ++c) {
|
2013-08-28 18:31:52 +08:00
|
|
|
if (*c == '*') {
|
2018-05-01 00:49:04 +08:00
|
|
|
// '*' indicates RLE. Next character will give us the repeat count
|
|
|
|
// and previous character is what is to be repeated.
|
2015-05-27 22:12:34 +08:00
|
|
|
char char_to_repeat = packet_str.back();
|
|
|
|
// Number of time the previous character is repeated
|
2013-08-28 18:31:52 +08:00
|
|
|
int repeat_count = *++c + 3 - ' ';
|
2018-05-01 00:49:04 +08:00
|
|
|
// We have the char_to_repeat and repeat_count. Now push it in the
|
|
|
|
// packet.
|
2013-08-28 18:31:52 +08:00
|
|
|
for (int i = 0; i < repeat_count; ++i)
|
|
|
|
packet_str.push_back(char_to_repeat);
|
2014-02-25 03:07:29 +08:00
|
|
|
} else if (*c == 0x7d) {
|
2018-05-01 00:49:04 +08:00
|
|
|
// 0x7d is the escape character. The next character is to be XOR'd
|
|
|
|
// with 0x20.
|
2014-02-25 03:07:29 +08:00
|
|
|
char escapee = *++c ^ 0x20;
|
|
|
|
packet_str.push_back(escapee);
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2013-08-28 18:31:52 +08:00
|
|
|
packet_str.push_back(*c);
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2019-08-21 12:55:56 +08:00
|
|
|
packet = StringExtractorGDBRemote(packet_str);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-09-09 11:24:52 +08:00
|
|
|
if (m_bytes[0] == '$' || m_bytes[0] == '%') {
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
assert(checksum_idx < m_bytes.size());
|
|
|
|
if (::isxdigit(m_bytes[checksum_idx + 0]) ||
|
2014-06-21 04:41:07 +08:00
|
|
|
::isxdigit(m_bytes[checksum_idx + 1])) {
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
if (GetSendAcks()) {
|
2012-04-10 06:46:21 +08:00
|
|
|
const char *packet_checksum_cstr = &m_bytes[checksum_idx];
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
char packet_checksum = strtol(packet_checksum_cstr, nullptr, 16);
|
2018-03-28 18:19:10 +08:00
|
|
|
char actual_checksum = CalculcateChecksum(
|
|
|
|
llvm::StringRef(m_bytes).slice(content_start, content_end));
|
2013-08-28 18:31:52 +08:00
|
|
|
success = packet_checksum == actual_checksum;
|
2014-02-25 03:07:29 +08:00
|
|
|
if (!success) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"error: checksum mismatch: %.*s expected 0x%2.2x, "
|
|
|
|
"got 0x%2.2x",
|
|
|
|
(int)(total_length), m_bytes.c_str(),
|
|
|
|
(uint8_t)packet_checksum, (uint8_t)actual_checksum);
|
2011-06-17 09:22:15 +08:00
|
|
|
}
|
|
|
|
// Send the ack or nack if needed
|
2015-05-27 22:12:34 +08:00
|
|
|
if (!success)
|
|
|
|
SendNack();
|
|
|
|
else
|
|
|
|
SendAck();
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
} else {
|
2011-06-17 09:22:15 +08:00
|
|
|
success = false;
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "error: invalid checksum in packet: '%s'\n",
|
|
|
|
m_bytes.c_str());
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2011-06-17 09:22:15 +08:00
|
|
|
m_bytes.erase(0, total_length);
|
|
|
|
packet.SetFilePos(0);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-06-16 23:50:18 +08:00
|
|
|
if (isNotifyPacket)
|
2015-05-27 22:12:34 +08:00
|
|
|
return GDBRemoteCommunication::PacketType::Notify;
|
2016-09-07 04:57:50 +08:00
|
|
|
else
|
Add a new wart, I mean feature, on to gdb-remote protocol: compression.
For some communication channels, sending large packets can be very
slow. In those cases, it may be faster to compress the contents of
the packet on the target device and decompress it on the debug host
system. For instance, communicating with a device using something
like Bluetooth may be an environment where this tradeoff is a good one.
This patch adds a new field to the response to the "qSupported" packet
(which returns a "qXfer:features:" response) -- SupportedCompressions
and DefaultCompressionMinSize. These tell you what the remote
stub can support.
lldb, if it wants to enable compression and can handle one of those
algorithms, it can send a QEnableCompression packet specifying the
algorithm and optionally the minimum packet size to use compression
on. lldb may have better knowledge about the best tradeoff for
a given communication channel.
I added support to debugserver an lldb to use the zlib APIs
(if -DHAVE_LIBZ=1 is in CFLAGS and -lz is in LDFLAGS) and the
libcompression APIs on Mac OS X 10.11 and later
(if -DHAVE_LIBCOMPRESSION=1). libz "zlib-deflate" compression.
libcompression can support deflate, lz4, lzma, and a proprietary
lzfse algorithm. libcompression has been hand-tuned for Apple
hardware so it should be preferred if available.
debugserver currently only adds the SupportedCompressions when
it is being run on an Apple watch (TARGET_OS_WATCH). Comment
that #if out from RNBRemote.cpp if you want to enable it to
see how it works. I haven't tested this on a native system
configuration but surely it will be slower to compress & decompress
the packets in a same-system debug session.
I haven't had a chance to add support for this to
GDBRemoteCommunciationServer.cpp yet.
<rdar://problem/21090180>
llvm-svn: 240066
2015-06-19 05:46:06 +08:00
|
|
|
return GDBRemoteCommunication::PacketType::Standard;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2011-06-17 09:22:15 +08:00
|
|
|
packet.Clear();
|
2015-05-27 22:12:34 +08:00
|
|
|
return GDBRemoteCommunication::PacketType::Invalid;
|
2010-06-09 00:52:24 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
Status GDBRemoteCommunication::StartListenThread(const char *hostname,
|
|
|
|
uint16_t port) {
|
2019-07-06 01:42:08 +08:00
|
|
|
if (m_listen_thread.IsJoinable())
|
|
|
|
return Status("listen thread already running");
|
|
|
|
|
|
|
|
char listen_url[512];
|
|
|
|
if (hostname && hostname[0])
|
|
|
|
snprintf(listen_url, sizeof(listen_url), "listen://%s:%i", hostname, port);
|
|
|
|
else
|
|
|
|
snprintf(listen_url, sizeof(listen_url), "listen://%i", port);
|
|
|
|
m_listen_url = listen_url;
|
|
|
|
SetConnection(new ConnectionFileDescriptor());
|
|
|
|
llvm::Expected<HostThread> listen_thread = ThreadLauncher::LaunchThread(
|
|
|
|
listen_url, GDBRemoteCommunication::ListenThread, this);
|
|
|
|
if (!listen_thread)
|
|
|
|
return Status(listen_thread.takeError());
|
|
|
|
m_listen_thread = *listen_thread;
|
|
|
|
|
|
|
|
return Status();
|
2013-12-06 06:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool GDBRemoteCommunication::JoinListenThread() {
|
2014-09-24 02:32:09 +08:00
|
|
|
if (m_listen_thread.IsJoinable())
|
2014-09-10 04:54:56 +08:00
|
|
|
m_listen_thread.Join(nullptr);
|
2013-12-06 06:58:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
lldb::thread_result_t
|
|
|
|
GDBRemoteCommunication::ListenThread(lldb::thread_arg_t arg) {
|
|
|
|
GDBRemoteCommunication *comm = (GDBRemoteCommunication *)arg;
|
2017-05-12 12:51:55 +08:00
|
|
|
Status error;
|
2013-12-06 06:58:22 +08:00
|
|
|
ConnectionFileDescriptor *connection =
|
|
|
|
(ConnectionFileDescriptor *)comm->GetConnection();
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2013-12-06 06:58:22 +08:00
|
|
|
if (connection) {
|
|
|
|
// Do the listen on another thread so we can continue on...
|
|
|
|
if (connection->Connect(comm->m_listen_url.c_str(), &error) !=
|
|
|
|
eConnectionStatusSuccess)
|
[lldb] NFC modernize codebase with modernize-use-nullptr
Summary:
NFC = [[ https://llvm.org/docs/Lexicon.html#nfc | Non functional change ]]
This commit is the result of modernizing the LLDB codebase by using
`nullptr` instread of `0` or `NULL`. See
https://clang.llvm.org/extra/clang-tidy/checks/modernize-use-nullptr.html
for more information.
This is the command I ran and I to fix and format the code base:
```
run-clang-tidy.py \
-header-filter='.*' \
-checks='-*,modernize-use-nullptr' \
-fix ~/dev/llvm-project/lldb/.* \
-format \
-style LLVM \
-p ~/llvm-builds/debug-ninja-gcc
```
NOTE: There were also changes to `llvm/utils/unittest` but I did not
include them because I felt that maybe this library shall be updated in
isolation somehow.
NOTE: I know this is a rather large commit but it is a nobrainer in most
parts.
Reviewers: martong, espindola, shafik, #lldb, JDevlieghere
Reviewed By: JDevlieghere
Subscribers: arsenm, jvesely, nhaehnle, hiraditya, JDevlieghere, teemperor, rnkovacs, emaste, kubamracek, nemanjai, ki.stfu, javed.absar, arichardson, kbarton, jrtc27, MaskRay, atanasyan, dexonsmith, arphaman, jfb, jsji, jdoerfert, lldb-commits, llvm-commits
Tags: #lldb, #llvm
Differential Revision: https://reviews.llvm.org/D61847
llvm-svn: 361484
2019-05-23 19:14:47 +08:00
|
|
|
comm->SetConnection(nullptr);
|
2013-12-06 06:58:22 +08:00
|
|
|
}
|
2019-05-24 03:32:46 +08:00
|
|
|
return {};
|
2013-12-06 06:58:22 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
Status GDBRemoteCommunication::StartDebugserverProcess(
|
2015-03-31 17:52:22 +08:00
|
|
|
const char *url, Platform *platform, ProcessLaunchInfo &launch_info,
|
2016-08-13 00:46:18 +08:00
|
|
|
uint16_t *port, const Args *inferior_args, int pass_comm_fd) {
|
2014-07-23 07:41:36 +08:00
|
|
|
Log *log(ProcessGDBRemoteLog::GetLogIfAllCategoriesSet(GDBR_LOG_PROCESS));
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "GDBRemoteCommunication::%s(url=%s, port=%" PRIu16 ")",
|
|
|
|
__FUNCTION__, url ? url : "<empty>", port ? *port : uint16_t(0));
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2017-05-12 12:51:55 +08:00
|
|
|
Status error;
|
2011-04-12 13:54:46 +08:00
|
|
|
// If we locate debugserver, keep that located version around
|
|
|
|
static FileSpec g_debugserver_file_spec;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-04-12 13:54:46 +08:00
|
|
|
char debugserver_path[PATH_MAX];
|
|
|
|
FileSpec &debugserver_file_spec = launch_info.GetExecutableFile();
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2019-02-08 02:22:00 +08:00
|
|
|
Environment host_env = Host::GetEnvironment();
|
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// Always check to see if we have an environment override for the path to the
|
|
|
|
// debugserver to use and use it if we do.
|
2019-02-08 02:22:00 +08:00
|
|
|
std::string env_debugserver_path = host_env.lookup("LLDB_DEBUGSERVER_PATH");
|
|
|
|
if (!env_debugserver_path.empty()) {
|
2018-11-02 05:05:36 +08:00
|
|
|
debugserver_file_spec.SetFile(env_debugserver_path,
|
2018-06-14 06:08:14 +08:00
|
|
|
FileSpec::Style::native);
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() gdb-remote stub exe path set "
|
|
|
|
"from environment variable: %s",
|
|
|
|
__FUNCTION__, env_debugserver_path.c_str());
|
2011-04-12 13:54:46 +08:00
|
|
|
} else
|
|
|
|
debugserver_file_spec = g_debugserver_file_spec;
|
2018-11-02 01:09:25 +08:00
|
|
|
bool debugserver_exists =
|
|
|
|
FileSystem::Instance().Exists(debugserver_file_spec);
|
2011-04-12 13:54:46 +08:00
|
|
|
if (!debugserver_exists) {
|
2018-05-01 00:49:04 +08:00
|
|
|
// The debugserver binary is in the LLDB.framework/Resources directory.
|
2018-06-19 23:09:07 +08:00
|
|
|
debugserver_file_spec = HostInfo::GetSupportExeDir();
|
|
|
|
if (debugserver_file_spec) {
|
2011-04-12 13:54:46 +08:00
|
|
|
debugserver_file_spec.AppendPathComponent(DEBUGSERVER_BASENAME);
|
2018-11-02 01:09:25 +08:00
|
|
|
debugserver_exists = FileSystem::Instance().Exists(debugserver_file_spec);
|
2011-04-12 13:54:46 +08:00
|
|
|
if (debugserver_exists) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() found gdb-remote stub exe '%s'",
|
|
|
|
__FUNCTION__, debugserver_file_spec.GetPath().c_str());
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-04-12 13:54:46 +08:00
|
|
|
g_debugserver_file_spec = debugserver_file_spec;
|
2014-07-23 07:41:36 +08:00
|
|
|
} else {
|
2019-02-14 16:59:04 +08:00
|
|
|
if (platform)
|
|
|
|
debugserver_file_spec =
|
|
|
|
platform->LocateExecutable(DEBUGSERVER_BASENAME);
|
|
|
|
else
|
|
|
|
debugserver_file_spec.Clear();
|
2015-10-20 04:44:01 +08:00
|
|
|
if (debugserver_file_spec) {
|
|
|
|
// Platform::LocateExecutable() wouldn't return a path if it doesn't
|
|
|
|
// exist
|
|
|
|
debugserver_exists = true;
|
|
|
|
} else {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() could not find "
|
|
|
|
"gdb-remote stub exe '%s'",
|
|
|
|
__FUNCTION__, debugserver_file_spec.GetPath().c_str());
|
2011-04-12 13:54:46 +08:00
|
|
|
}
|
2015-10-20 04:44:01 +08:00
|
|
|
// Don't cache the platform specific GDB server binary as it could
|
2018-05-01 00:49:04 +08:00
|
|
|
// change from platform to platform
|
2011-04-12 13:54:46 +08:00
|
|
|
g_debugserver_file_spec.Clear();
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2011-04-12 13:54:46 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2011-04-12 13:54:46 +08:00
|
|
|
|
|
|
|
if (debugserver_exists) {
|
|
|
|
debugserver_file_spec.GetPath(debugserver_path, sizeof(debugserver_path));
|
2015-02-18 23:39:41 +08:00
|
|
|
|
2011-04-12 13:54:46 +08:00
|
|
|
Args &debugserver_args = launch_info.GetArguments();
|
|
|
|
debugserver_args.Clear();
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-04-12 13:54:46 +08:00
|
|
|
// Start args with "debugserver /file/path -r --"
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef(debugserver_path));
|
2013-12-06 06:58:22 +08:00
|
|
|
|
2015-02-18 23:39:41 +08:00
|
|
|
#if !defined(__APPLE__)
|
|
|
|
// First argument to lldb-server must be mode in which to run.
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("gdbserver"));
|
2015-02-18 23:39:41 +08:00
|
|
|
#endif
|
|
|
|
|
2015-10-22 03:34:26 +08:00
|
|
|
// If a url is supplied then use it
|
|
|
|
if (url)
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef(url));
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2016-08-13 00:46:18 +08:00
|
|
|
if (pass_comm_fd >= 0) {
|
|
|
|
StreamString fd_arg;
|
|
|
|
fd_arg.Printf("--fd=%i", pass_comm_fd);
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(fd_arg.GetString());
|
2016-08-13 00:46:18 +08:00
|
|
|
// Send "pass_comm_fd" down to the inferior so it can use it to
|
|
|
|
// communicate back with this process
|
|
|
|
launch_info.AppendDuplicateFileAction(pass_comm_fd, pass_comm_fd);
|
|
|
|
}
|
|
|
|
|
2011-04-12 13:54:46 +08:00
|
|
|
// use native registers, not the GDB registers
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("--native-regs"));
|
2015-01-29 01:36:59 +08:00
|
|
|
|
|
|
|
if (launch_info.GetLaunchInSeparateProcessGroup()) {
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("--setsid"));
|
2015-01-29 01:36:59 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2018-12-11 01:23:28 +08:00
|
|
|
llvm::SmallString<128> named_pipe_path;
|
2015-10-22 03:34:26 +08:00
|
|
|
// socket_pipe is used by debug server to communicate back either
|
|
|
|
// TCP port or domain socket name which it listens on.
|
|
|
|
// The second purpose of the pipe to serve as a synchronization point -
|
|
|
|
// once data is written to the pipe, debug server is up and running.
|
|
|
|
Pipe socket_pipe;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// port is null when debug server should listen on domain socket - we're
|
|
|
|
// not interested in port value but rather waiting for debug server to
|
|
|
|
// become available.
|
2017-02-23 16:49:49 +08:00
|
|
|
if (pass_comm_fd == -1) {
|
2016-09-07 04:57:50 +08:00
|
|
|
if (url) {
|
2018-05-01 00:49:04 +08:00
|
|
|
// Create a temporary file to get the stdout/stderr and redirect the output of
|
|
|
|
// the command into this file. We will later read this file if all goes well
|
|
|
|
// and fill the data into "command_output_ptr"
|
2015-05-10 23:22:09 +08:00
|
|
|
#if defined(__APPLE__)
|
2011-04-12 13:54:46 +08:00
|
|
|
// Binding to port zero, we need to figure out what port it ends up
|
2015-09-23 07:25:44 +08:00
|
|
|
// using using a named pipe...
|
2011-04-12 13:54:46 +08:00
|
|
|
error = socket_pipe.CreateWithUniqueName("debugserver-named-pipe",
|
|
|
|
false, named_pipe_path);
|
|
|
|
if (error.Fail()) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() "
|
|
|
|
"named pipe creation failed: %s",
|
|
|
|
__FUNCTION__, error.AsCString());
|
2011-04-12 13:54:46 +08:00
|
|
|
return error;
|
|
|
|
}
|
2016-09-20 06:06:12 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("--named-pipe"));
|
2016-09-22 00:01:43 +08:00
|
|
|
debugserver_args.AppendArgument(named_pipe_path);
|
2015-05-10 23:22:09 +08:00
|
|
|
#else
|
|
|
|
// Binding to port zero, we need to figure out what port it ends up
|
|
|
|
// using using an unnamed pipe...
|
|
|
|
error = socket_pipe.CreateNew(true);
|
|
|
|
if (error.Fail()) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() "
|
|
|
|
"unnamed pipe creation failed: %s",
|
|
|
|
__FUNCTION__, error.AsCString());
|
2015-05-10 23:22:09 +08:00
|
|
|
return error;
|
|
|
|
}
|
2019-01-10 08:46:09 +08:00
|
|
|
pipe_t write = socket_pipe.GetWritePipe();
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("--pipe"));
|
2019-01-10 08:46:09 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::to_string(write));
|
2015-10-22 03:34:26 +08:00
|
|
|
launch_info.AppendCloseFileAction(socket_pipe.GetReadFileDescriptor());
|
2015-05-10 23:22:09 +08:00
|
|
|
#endif
|
2014-08-30 01:10:31 +08:00
|
|
|
} else {
|
|
|
|
// No host and port given, so lets listen on our end and make the
|
2018-05-01 00:49:04 +08:00
|
|
|
// debugserver connect to us..
|
2014-08-30 01:10:31 +08:00
|
|
|
error = StartListenThread("127.0.0.1", 0);
|
|
|
|
if (error.Fail()) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() unable to start listen "
|
|
|
|
"thread: %s",
|
|
|
|
__FUNCTION__, error.AsCString());
|
2016-08-13 00:46:18 +08:00
|
|
|
return error;
|
2015-12-08 22:08:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ConnectionFileDescriptor *connection =
|
2015-09-23 07:25:44 +08:00
|
|
|
(ConnectionFileDescriptor *)GetConnection();
|
2015-12-08 22:08:19 +08:00
|
|
|
// Wait for 10 seconds to resolve the bound port
|
2018-05-09 22:29:30 +08:00
|
|
|
uint16_t port_ = connection->GetListeningPort(std::chrono::seconds(10));
|
2015-12-08 22:08:19 +08:00
|
|
|
if (port_ > 0) {
|
2015-09-23 07:25:44 +08:00
|
|
|
char port_cstr[32];
|
2015-12-08 22:08:19 +08:00
|
|
|
snprintf(port_cstr, sizeof(port_cstr), "127.0.0.1:%i", port_);
|
|
|
|
// Send the host and port down that debugserver and specify an option
|
|
|
|
// so that it connects back to the port we are listening to in this
|
|
|
|
// process
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("--reverse-connect"));
|
|
|
|
debugserver_args.AppendArgument(llvm::StringRef(port_cstr));
|
2016-08-15 17:17:13 +08:00
|
|
|
if (port)
|
|
|
|
*port = port_;
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2015-12-08 22:08:19 +08:00
|
|
|
error.SetErrorString("failed to bind to port 0 on 127.0.0.1");
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "GDBRemoteCommunication::%s() failed: %s",
|
|
|
|
__FUNCTION__, error.AsCString());
|
2015-09-23 07:25:44 +08:00
|
|
|
return error;
|
2015-12-08 22:08:19 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
}
|
2019-02-08 02:22:00 +08:00
|
|
|
std::string env_debugserver_log_file =
|
|
|
|
host_env.lookup("LLDB_DEBUGSERVER_LOG_FILE");
|
|
|
|
if (!env_debugserver_log_file.empty()) {
|
|
|
|
debugserver_args.AppendArgument(
|
|
|
|
llvm::formatv("--log-file={0}", env_debugserver_log_file).str());
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2014-11-05 08:58:55 +08:00
|
|
|
|
|
|
|
#if defined(__APPLE__)
|
|
|
|
const char *env_debugserver_log_flags =
|
|
|
|
getenv("LLDB_DEBUGSERVER_LOG_FLAGS");
|
2011-04-12 13:54:46 +08:00
|
|
|
if (env_debugserver_log_flags) {
|
2019-02-08 02:22:00 +08:00
|
|
|
debugserver_args.AppendArgument(
|
|
|
|
llvm::formatv("--log-flags={0}", env_debugserver_log_flags).str());
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
#else
|
2019-02-08 02:22:00 +08:00
|
|
|
std::string env_debugserver_log_channels =
|
|
|
|
host_env.lookup("LLDB_SERVER_LOG_CHANNELS");
|
|
|
|
if (!env_debugserver_log_channels.empty()) {
|
|
|
|
debugserver_args.AppendArgument(
|
|
|
|
llvm::formatv("--log-channels={0}", env_debugserver_log_channels)
|
|
|
|
.str());
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
#endif
|
2016-06-01 02:32:20 +08:00
|
|
|
|
2014-08-30 01:10:31 +08:00
|
|
|
// Add additional args, starting with LLDB_DEBUGSERVER_EXTRA_ARG_1 until an
|
|
|
|
// env var doesn't come back.
|
|
|
|
uint32_t env_var_index = 1;
|
|
|
|
bool has_env_var;
|
2016-09-07 04:57:50 +08:00
|
|
|
do {
|
2014-08-30 01:10:31 +08:00
|
|
|
char env_var_name[64];
|
|
|
|
snprintf(env_var_name, sizeof(env_var_name),
|
|
|
|
"LLDB_DEBUGSERVER_EXTRA_ARG_%" PRIu32, env_var_index++);
|
2019-02-08 02:22:00 +08:00
|
|
|
std::string extra_arg = host_env.lookup(env_var_name);
|
|
|
|
has_env_var = !extra_arg.empty();
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2014-08-30 01:10:31 +08:00
|
|
|
if (has_env_var) {
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef(extra_arg));
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s adding env var %s contents "
|
|
|
|
"to stub command line (%s)",
|
|
|
|
__FUNCTION__, env_var_name, extra_arg.c_str());
|
2015-04-28 07:20:30 +08:00
|
|
|
}
|
2015-10-22 03:34:26 +08:00
|
|
|
} while (has_env_var);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-10-22 03:34:26 +08:00
|
|
|
if (inferior_args && inferior_args->GetArgumentCount() > 0) {
|
2016-09-20 01:54:06 +08:00
|
|
|
debugserver_args.AppendArgument(llvm::StringRef("--"));
|
2015-04-28 07:20:30 +08:00
|
|
|
debugserver_args.AppendArguments(*inferior_args);
|
2014-03-01 04:47:08 +08:00
|
|
|
}
|
2015-04-28 07:20:30 +08:00
|
|
|
|
|
|
|
// Copy the current environment to the gdbserver/debugserver instance
|
2019-02-08 02:22:00 +08:00
|
|
|
launch_info.GetEnvironment() = host_env;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-10-26 08:56:27 +08:00
|
|
|
// Close STDIN, STDOUT and STDERR.
|
2013-12-05 03:19:12 +08:00
|
|
|
launch_info.AppendCloseFileAction(STDIN_FILENO);
|
|
|
|
launch_info.AppendCloseFileAction(STDOUT_FILENO);
|
2011-10-26 08:56:27 +08:00
|
|
|
launch_info.AppendCloseFileAction(STDERR_FILENO);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-10-26 08:56:27 +08:00
|
|
|
// Redirect STDIN, STDOUT and STDERR to "/dev/null".
|
2014-11-05 08:58:55 +08:00
|
|
|
launch_info.AppendSuppressFileAction(STDIN_FILENO, true, false);
|
|
|
|
launch_info.AppendSuppressFileAction(STDOUT_FILENO, false, true);
|
2011-10-26 08:56:27 +08:00
|
|
|
launch_info.AppendSuppressFileAction(STDERR_FILENO, false, true);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2011-10-26 08:56:27 +08:00
|
|
|
if (log) {
|
2016-06-01 02:32:20 +08:00
|
|
|
StreamString string_stream;
|
2011-10-26 08:56:27 +08:00
|
|
|
Platform *const platform = nullptr;
|
|
|
|
launch_info.Dump(string_stream, platform);
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "launch info for gdb-remote stub:\n%s",
|
|
|
|
string_stream.GetData());
|
2011-04-12 13:54:46 +08:00
|
|
|
}
|
|
|
|
error = Host::LaunchProcess(launch_info);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2016-08-13 00:46:18 +08:00
|
|
|
if (error.Success() &&
|
|
|
|
(launch_info.GetProcessID() != LLDB_INVALID_PROCESS_ID) &&
|
|
|
|
pass_comm_fd == -1) {
|
2015-02-06 00:29:12 +08:00
|
|
|
if (named_pipe_path.size() > 0) {
|
2015-10-22 03:34:26 +08:00
|
|
|
error = socket_pipe.OpenAsReader(named_pipe_path, false);
|
2015-09-23 07:25:44 +08:00
|
|
|
if (error.Fail())
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() "
|
|
|
|
"failed to open named pipe %s for reading: %s",
|
|
|
|
__FUNCTION__, named_pipe_path.c_str(), error.AsCString());
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2015-10-22 03:34:26 +08:00
|
|
|
if (socket_pipe.CanWrite())
|
|
|
|
socket_pipe.CloseWriteFileDescriptor();
|
|
|
|
if (socket_pipe.CanRead()) {
|
|
|
|
char port_cstr[PATH_MAX] = {0};
|
2015-04-28 07:20:30 +08:00
|
|
|
port_cstr[0] = '\0';
|
|
|
|
size_t num_bytes = sizeof(port_cstr);
|
|
|
|
// Read port from pipe with 10 second timeout.
|
2015-10-22 03:34:26 +08:00
|
|
|
error = socket_pipe.ReadWithTimeout(
|
2015-04-28 07:20:30 +08:00
|
|
|
port_cstr, num_bytes, std::chrono::seconds{10}, num_bytes);
|
2016-08-13 00:46:18 +08:00
|
|
|
if (error.Success() && (port != nullptr)) {
|
2015-04-28 07:20:30 +08:00
|
|
|
assert(num_bytes > 0 && port_cstr[num_bytes - 1] == '\0');
|
2017-02-23 16:49:49 +08:00
|
|
|
uint16_t child_port = StringConvert::ToUInt32(port_cstr, 0);
|
|
|
|
if (*port == 0 || *port == child_port) {
|
|
|
|
*port = child_port;
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() "
|
|
|
|
"debugserver listens %u port",
|
|
|
|
__FUNCTION__, *port);
|
2017-02-23 16:49:49 +08:00
|
|
|
} else {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() "
|
|
|
|
"debugserver listening on port "
|
|
|
|
"%d but requested port was %d",
|
|
|
|
__FUNCTION__, (uint32_t)child_port, (uint32_t)(*port));
|
2017-02-23 16:49:49 +08:00
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s() "
|
|
|
|
"failed to read a port value from pipe %s: %s",
|
|
|
|
__FUNCTION__, named_pipe_path.c_str(), error.AsCString());
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2015-10-22 03:34:26 +08:00
|
|
|
socket_pipe.Close();
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
|
2015-02-06 00:29:12 +08:00
|
|
|
if (named_pipe_path.size() > 0) {
|
2015-10-22 03:34:26 +08:00
|
|
|
const auto err = socket_pipe.Delete(named_pipe_path);
|
2015-09-23 07:25:44 +08:00
|
|
|
if (err.Fail()) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log,
|
|
|
|
"GDBRemoteCommunication::%s failed to delete pipe %s: %s",
|
|
|
|
__FUNCTION__, named_pipe_path.c_str(), err.AsCString());
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
|
|
|
}
|
2015-05-12 09:10:56 +08:00
|
|
|
|
|
|
|
// Make sure we actually connect with the debugserver...
|
|
|
|
JoinListenThread();
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
} else {
|
2015-10-22 03:34:26 +08:00
|
|
|
error.SetErrorStringWithFormat("unable to locate " DEBUGSERVER_BASENAME);
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2015-05-12 09:10:56 +08:00
|
|
|
|
2015-04-28 07:20:30 +08:00
|
|
|
if (error.Fail()) {
|
2019-07-25 01:56:10 +08:00
|
|
|
LLDB_LOGF(log, "GDBRemoteCommunication::%s() failed: %s", __FUNCTION__,
|
|
|
|
error.AsCString());
|
2011-04-12 13:54:46 +08:00
|
|
|
}
|
|
|
|
|
2012-04-14 05:24:18 +08:00
|
|
|
return error;
|
2012-04-10 06:46:21 +08:00
|
|
|
}
|
2015-02-24 18:23:39 +08:00
|
|
|
|
|
|
|
void GDBRemoteCommunication::DumpHistory(Stream &strm) { m_history.Dump(strm); }
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2019-12-11 07:04:02 +08:00
|
|
|
void GDBRemoteCommunication::SetPacketRecorder(
|
|
|
|
repro::PacketRecorder *recorder) {
|
|
|
|
m_history.SetRecorder(recorder);
|
2019-01-30 06:55:21 +08:00
|
|
|
}
|
2018-11-14 03:18:16 +08:00
|
|
|
|
|
|
|
llvm::Error
|
|
|
|
GDBRemoteCommunication::ConnectLocally(GDBRemoteCommunication &client,
|
|
|
|
GDBRemoteCommunication &server) {
|
|
|
|
const bool child_processes_inherit = false;
|
|
|
|
const int backlog = 5;
|
|
|
|
TCPSocket listen_socket(true, child_processes_inherit);
|
|
|
|
if (llvm::Error error =
|
|
|
|
listen_socket.Listen("127.0.0.1:0", backlog).ToError())
|
|
|
|
return error;
|
|
|
|
|
|
|
|
Socket *accept_socket;
|
|
|
|
std::future<Status> accept_status = std::async(
|
|
|
|
std::launch::async, [&] { return listen_socket.Accept(accept_socket); });
|
|
|
|
|
|
|
|
llvm::SmallString<32> remote_addr;
|
|
|
|
llvm::raw_svector_ostream(remote_addr)
|
2019-03-03 20:42:43 +08:00
|
|
|
<< "connect://127.0.0.1:" << listen_socket.GetLocalPortNumber();
|
2018-11-14 03:18:16 +08:00
|
|
|
|
|
|
|
std::unique_ptr<ConnectionFileDescriptor> conn_up(
|
|
|
|
new ConnectionFileDescriptor());
|
2019-02-18 18:36:23 +08:00
|
|
|
Status status;
|
|
|
|
if (conn_up->Connect(remote_addr, &status) != lldb::eConnectionStatusSuccess)
|
|
|
|
return llvm::createStringError(llvm::inconvertibleErrorCode(),
|
|
|
|
"Unable to connect: %s", status.AsCString());
|
2018-11-14 03:18:16 +08:00
|
|
|
|
|
|
|
client.SetConnection(conn_up.release());
|
|
|
|
if (llvm::Error error = accept_status.get().ToError())
|
|
|
|
return error;
|
|
|
|
|
|
|
|
server.SetConnection(new ConnectionFileDescriptor(accept_socket));
|
|
|
|
return llvm::Error::success();
|
|
|
|
}
|
|
|
|
|
2015-02-24 18:23:39 +08:00
|
|
|
GDBRemoteCommunication::ScopedTimeout::ScopedTimeout(
|
2016-11-01 01:19:42 +08:00
|
|
|
GDBRemoteCommunication &gdb_comm, std::chrono::seconds timeout)
|
2019-02-08 02:22:00 +08:00
|
|
|
: m_gdb_comm(gdb_comm), m_timeout_modified(false) {
|
|
|
|
auto curr_timeout = gdb_comm.GetPacketTimeout();
|
|
|
|
// Only update the timeout if the timeout is greater than the current
|
|
|
|
// timeout. If the current timeout is larger, then just use that.
|
|
|
|
if (curr_timeout < timeout) {
|
|
|
|
m_timeout_modified = true;
|
|
|
|
m_saved_timeout = m_gdb_comm.SetPacketTimeout(timeout);
|
|
|
|
}
|
2015-02-24 18:23:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
GDBRemoteCommunication::ScopedTimeout::~ScopedTimeout() {
|
2017-04-18 00:20:22 +08:00
|
|
|
// Only restore the timeout if we set it in the constructor.
|
|
|
|
if (m_timeout_modified)
|
|
|
|
m_gdb_comm.SetPacketTimeout(m_saved_timeout);
|
2015-02-24 18:23:39 +08:00
|
|
|
}
|
2015-06-16 23:50:18 +08:00
|
|
|
|
|
|
|
// This function is called via the Communications class read thread when bytes
|
2018-05-01 00:49:04 +08:00
|
|
|
// become available for this connection. This function will consume all
|
|
|
|
// incoming bytes and try to parse whole packets as they become available. Full
|
|
|
|
// packets are placed in a queue, so that all packet requests can simply pop
|
|
|
|
// from this queue. Async notification packets will be dispatched immediately
|
|
|
|
// to the ProcessGDBRemote Async thread via an event.
|
2015-06-16 23:50:18 +08:00
|
|
|
void GDBRemoteCommunication::AppendBytesToCache(const uint8_t *bytes,
|
|
|
|
size_t len, bool broadcast,
|
|
|
|
lldb::ConnectionStatus status) {
|
|
|
|
StringExtractorGDBRemote packet;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-06-16 23:50:18 +08:00
|
|
|
while (true) {
|
|
|
|
PacketType type = CheckForPacket(bytes, len, packet);
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// scrub the data so we do not pass it back to CheckForPacket on future
|
|
|
|
// passes of the loop
|
2015-06-16 23:50:18 +08:00
|
|
|
bytes = nullptr;
|
|
|
|
len = 0;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-06-16 23:50:18 +08:00
|
|
|
// we may have received no packet so lets bail out
|
|
|
|
if (type == PacketType::Invalid)
|
|
|
|
break;
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2015-06-16 23:50:18 +08:00
|
|
|
if (type == PacketType::Standard) {
|
|
|
|
// scope for the mutex
|
|
|
|
{
|
|
|
|
// lock down the packet queue
|
2016-07-29 01:32:20 +08:00
|
|
|
std::lock_guard<std::mutex> guard(m_packet_queue_mutex);
|
2015-06-16 23:50:18 +08:00
|
|
|
// push a new packet into the queue
|
|
|
|
m_packet_queue.push(packet);
|
|
|
|
// Signal condition variable that we have a packet
|
2016-07-29 01:32:20 +08:00
|
|
|
m_condition_queue_not_empty.notify_one();
|
2015-06-16 23:50:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == PacketType::Notify) {
|
|
|
|
// put this packet into an event
|
2019-08-21 12:55:56 +08:00
|
|
|
const char *pdata = packet.GetStringRef().data();
|
2015-06-16 23:50:18 +08:00
|
|
|
|
2018-05-01 00:49:04 +08:00
|
|
|
// as the communication class, we are a broadcaster and the async thread
|
|
|
|
// is tuned to listen to us
|
2015-06-16 23:50:18 +08:00
|
|
|
BroadcastEvent(eBroadcastBitGdbReadThreadGotNotify,
|
|
|
|
new EventDataBytes(pdata));
|
|
|
|
}
|
2016-09-07 04:57:50 +08:00
|
|
|
}
|
2015-06-16 23:50:18 +08:00
|
|
|
}
|
2017-11-09 23:45:09 +08:00
|
|
|
|
|
|
|
void llvm::format_provider<GDBRemoteCommunication::PacketResult>::format(
|
|
|
|
const GDBRemoteCommunication::PacketResult &result, raw_ostream &Stream,
|
|
|
|
StringRef Style) {
|
|
|
|
using PacketResult = GDBRemoteCommunication::PacketResult;
|
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case PacketResult::Success:
|
|
|
|
Stream << "Success";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorSendFailed:
|
|
|
|
Stream << "ErrorSendFailed";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorSendAck:
|
|
|
|
Stream << "ErrorSendAck";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorReplyFailed:
|
|
|
|
Stream << "ErrorReplyFailed";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorReplyTimeout:
|
|
|
|
Stream << "ErrorReplyTimeout";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorReplyInvalid:
|
|
|
|
Stream << "ErrorReplyInvalid";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorReplyAck:
|
|
|
|
Stream << "ErrorReplyAck";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorDisconnected:
|
|
|
|
Stream << "ErrorDisconnected";
|
|
|
|
break;
|
|
|
|
case PacketResult::ErrorNoSequenceLock:
|
|
|
|
Stream << "ErrorNoSequenceLock";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|