2017-12-19 20:23:48 +08:00
|
|
|
//===--- SourceCode.h - Manipulating source code as strings -----*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-12-19 20:23:48 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "SourceCode.h"
|
|
|
|
|
2019-03-28 01:47:49 +08:00
|
|
|
#include "Context.h"
|
2019-05-06 18:25:10 +08:00
|
|
|
#include "FuzzyMatch.h"
|
2018-07-06 03:35:01 +08:00
|
|
|
#include "Logger.h"
|
2019-03-28 01:47:49 +08:00
|
|
|
#include "Protocol.h"
|
2019-09-09 20:28:44 +08:00
|
|
|
#include "refactor/Tweak.h"
|
2018-07-06 03:35:01 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2019-07-12 19:42:31 +08:00
|
|
|
#include "clang/Basic/LangOptions.h"
|
|
|
|
#include "clang/Basic/SourceLocation.h"
|
2018-02-21 10:39:08 +08:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
2019-04-26 15:45:49 +08:00
|
|
|
#include "clang/Basic/TokenKinds.h"
|
|
|
|
#include "clang/Format/Format.h"
|
2018-07-06 03:35:01 +08:00
|
|
|
#include "clang/Lex/Lexer.h"
|
2019-07-01 17:26:48 +08:00
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2019-09-09 20:28:44 +08:00
|
|
|
#include "clang/Tooling/Core/Replacement.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2019-02-01 05:30:05 +08:00
|
|
|
#include "llvm/ADT/None.h"
|
2019-04-26 15:45:49 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2019-09-09 20:28:44 +08:00
|
|
|
#include "llvm/ADT/StringMap.h"
|
2019-02-01 05:30:05 +08:00
|
|
|
#include "llvm/ADT/StringRef.h"
|
2019-05-06 18:25:10 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
Make positionToOffset return llvm::Expected<size_t>
Summary:
To implement incremental document syncing, we want to verify that the
ranges provided by the front-end are valid. Currently, positionToOffset
deals with invalid Positions by returning 0 or Code.size(), which are
two valid offsets. Instead, return an llvm:Expected<size_t> with an
error if the position is invalid.
According to the LSP, if the character value exceeds the number of
characters of the given line, it should default back to the end of the
line. It makes sense in some contexts to have this behavior, and does
not in other contexts. The AllowColumnsBeyondLineLength parameter
allows to decide what to do in that case, default back to the end of the
line, or return an error.
Reviewers: ilya-biryukov
Subscribers: klimek, ilya-biryukov, jkorous-apple, ioeric, cfe-commits
Differential Revision: https://reviews.llvm.org/D44673
llvm-svn: 328100
2018-03-21 22:36:46 +08:00
|
|
|
#include "llvm/Support/Errc.h"
|
|
|
|
#include "llvm/Support/Error.h"
|
2019-03-28 22:37:51 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2019-09-09 20:28:44 +08:00
|
|
|
#include "llvm/Support/LineIterator.h"
|
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2018-07-06 03:35:01 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2019-09-09 20:28:44 +08:00
|
|
|
#include "llvm/Support/SHA1.h"
|
|
|
|
#include "llvm/Support/VirtualFileSystem.h"
|
[clangd] Use xxhash instead of SHA1 for background index file digests.
Summary:
Currently SHA1 is about 10% of our CPU, this patch reduces it to ~1%.
xxhash is a well-defined (stable) non-cryptographic hash optimized for
fast checksums (like crc32).
Collisions shouldn't be a problem, despite the reduced length:
- for actual file content (used to invalidate bg index shards), there
are only two versions that can collide (new shard and old shard).
- for file paths in bg index shard filenames, we would need 2^32 files
with the same filename to expect a collision. Imperfect hashing may
reduce this a bit but it's well beyond what's plausible.
This will invalidate shards on disk (as usual; I bumped the version),
but this time the filenames are changing so the old files will stick
around :-( So this is more expensive than the usual bump, but would be
good to land before the v9 branch when everyone will start using bg index.
Reviewers: kadircet
Subscribers: ilya-biryukov, MaskRay, jkorous, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64306
llvm-svn: 365311
2019-07-08 19:33:17 +08:00
|
|
|
#include "llvm/Support/xxhash.h"
|
2019-04-26 15:45:49 +08:00
|
|
|
#include <algorithm>
|
2018-02-21 10:39:08 +08:00
|
|
|
|
2017-12-19 20:23:48 +08:00
|
|
|
namespace clang {
|
|
|
|
namespace clangd {
|
|
|
|
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
// Here be dragons. LSP positions use columns measured in *UTF-16 code units*!
|
|
|
|
// Clangd uses UTF-8 and byte-offsets internally, so conversion is nontrivial.
|
|
|
|
|
|
|
|
// Iterates over unicode codepoints in the (UTF-8) string. For each,
|
|
|
|
// invokes CB(UTF-8 length, UTF-16 length), and breaks if it returns true.
|
|
|
|
// Returns true if CB returned true, false if we hit the end of string.
|
|
|
|
template <typename Callback>
|
2019-01-07 23:45:19 +08:00
|
|
|
static bool iterateCodepoints(llvm::StringRef U8, const Callback &CB) {
|
2019-03-28 22:37:51 +08:00
|
|
|
// A codepoint takes two UTF-16 code unit if it's astral (outside BMP).
|
|
|
|
// Astral codepoints are encoded as 4 bytes in UTF-8, starting with 11110xxx.
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
for (size_t I = 0; I < U8.size();) {
|
|
|
|
unsigned char C = static_cast<unsigned char>(U8[I]);
|
|
|
|
if (LLVM_LIKELY(!(C & 0x80))) { // ASCII character.
|
|
|
|
if (CB(1, 1))
|
|
|
|
return true;
|
|
|
|
++I;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// This convenient property of UTF-8 holds for all non-ASCII characters.
|
2019-01-07 23:45:19 +08:00
|
|
|
size_t UTF8Length = llvm::countLeadingOnes(C);
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
// 0xxx is ASCII, handled above. 10xxx is a trailing byte, invalid here.
|
|
|
|
// 11111xxx is not valid UTF-8 at all. Assert because it's probably our bug.
|
|
|
|
assert((UTF8Length >= 2 && UTF8Length <= 4) &&
|
|
|
|
"Invalid UTF-8, or transcoding bug?");
|
|
|
|
I += UTF8Length; // Skip over all trailing bytes.
|
|
|
|
// A codepoint takes two UTF-16 code unit if it's astral (outside BMP).
|
|
|
|
// Astral codepoints are encoded as 4 bytes in UTF-8 (11110xxx ...)
|
|
|
|
if (CB(UTF8Length, UTF8Length == 4 ? 2 : 1))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-28 22:37:51 +08:00
|
|
|
// Returns the byte offset into the string that is an offset of \p Units in
|
|
|
|
// the specified encoding.
|
|
|
|
// Conceptually, this converts to the encoding, truncates to CodeUnits,
|
|
|
|
// converts back to UTF-8, and returns the length in bytes.
|
|
|
|
static size_t measureUnits(llvm::StringRef U8, int Units, OffsetEncoding Enc,
|
|
|
|
bool &Valid) {
|
|
|
|
Valid = Units >= 0;
|
|
|
|
if (Units <= 0)
|
|
|
|
return 0;
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
size_t Result = 0;
|
2019-03-28 22:37:51 +08:00
|
|
|
switch (Enc) {
|
|
|
|
case OffsetEncoding::UTF8:
|
|
|
|
Result = Units;
|
|
|
|
break;
|
|
|
|
case OffsetEncoding::UTF16:
|
|
|
|
Valid = iterateCodepoints(U8, [&](int U8Len, int U16Len) {
|
|
|
|
Result += U8Len;
|
|
|
|
Units -= U16Len;
|
|
|
|
return Units <= 0;
|
|
|
|
});
|
|
|
|
if (Units < 0) // Offset in the middle of a surrogate pair.
|
|
|
|
Valid = false;
|
|
|
|
break;
|
|
|
|
case OffsetEncoding::UTF32:
|
|
|
|
Valid = iterateCodepoints(U8, [&](int U8Len, int U16Len) {
|
|
|
|
Result += U8Len;
|
|
|
|
Units--;
|
|
|
|
return Units <= 0;
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case OffsetEncoding::UnsupportedEncoding:
|
|
|
|
llvm_unreachable("unsupported encoding");
|
|
|
|
}
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
// Don't return an out-of-range index if we overran.
|
2019-03-28 22:37:51 +08:00
|
|
|
if (Result > U8.size()) {
|
|
|
|
Valid = false;
|
|
|
|
return U8.size();
|
|
|
|
}
|
|
|
|
return Result;
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
}
|
|
|
|
|
2019-03-28 01:47:49 +08:00
|
|
|
Key<OffsetEncoding> kCurrentOffsetEncoding;
|
2019-03-28 22:37:51 +08:00
|
|
|
static OffsetEncoding lspEncoding() {
|
2019-03-28 01:47:49 +08:00
|
|
|
auto *Enc = Context::current().get(kCurrentOffsetEncoding);
|
2019-03-28 22:37:51 +08:00
|
|
|
return Enc ? *Enc : OffsetEncoding::UTF16;
|
2019-03-28 01:47:49 +08:00
|
|
|
}
|
|
|
|
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
// Like most strings in clangd, the input is UTF-8 encoded.
|
2019-01-07 23:45:19 +08:00
|
|
|
size_t lspLength(llvm::StringRef Code) {
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
size_t Count = 0;
|
2019-03-28 22:37:51 +08:00
|
|
|
switch (lspEncoding()) {
|
|
|
|
case OffsetEncoding::UTF8:
|
|
|
|
Count = Code.size();
|
|
|
|
break;
|
|
|
|
case OffsetEncoding::UTF16:
|
|
|
|
iterateCodepoints(Code, [&](int U8Len, int U16Len) {
|
|
|
|
Count += U16Len;
|
|
|
|
return false;
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case OffsetEncoding::UTF32:
|
|
|
|
iterateCodepoints(Code, [&](int U8Len, int U16Len) {
|
|
|
|
++Count;
|
|
|
|
return false;
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case OffsetEncoding::UnsupportedEncoding:
|
|
|
|
llvm_unreachable("unsupported encoding");
|
|
|
|
}
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::Expected<size_t> positionToOffset(llvm::StringRef Code, Position P,
|
|
|
|
bool AllowColumnsBeyondLineLength) {
|
2017-12-19 20:23:48 +08:00
|
|
|
if (P.line < 0)
|
2019-01-07 23:45:19 +08:00
|
|
|
return llvm::make_error<llvm::StringError>(
|
|
|
|
llvm::formatv("Line value can't be negative ({0})", P.line),
|
|
|
|
llvm::errc::invalid_argument);
|
Make positionToOffset return llvm::Expected<size_t>
Summary:
To implement incremental document syncing, we want to verify that the
ranges provided by the front-end are valid. Currently, positionToOffset
deals with invalid Positions by returning 0 or Code.size(), which are
two valid offsets. Instead, return an llvm:Expected<size_t> with an
error if the position is invalid.
According to the LSP, if the character value exceeds the number of
characters of the given line, it should default back to the end of the
line. It makes sense in some contexts to have this behavior, and does
not in other contexts. The AllowColumnsBeyondLineLength parameter
allows to decide what to do in that case, default back to the end of the
line, or return an error.
Reviewers: ilya-biryukov
Subscribers: klimek, ilya-biryukov, jkorous-apple, ioeric, cfe-commits
Differential Revision: https://reviews.llvm.org/D44673
llvm-svn: 328100
2018-03-21 22:36:46 +08:00
|
|
|
if (P.character < 0)
|
2019-01-07 23:45:19 +08:00
|
|
|
return llvm::make_error<llvm::StringError>(
|
|
|
|
llvm::formatv("Character value can't be negative ({0})", P.character),
|
|
|
|
llvm::errc::invalid_argument);
|
2017-12-19 20:23:48 +08:00
|
|
|
size_t StartOfLine = 0;
|
|
|
|
for (int I = 0; I != P.line; ++I) {
|
|
|
|
size_t NextNL = Code.find('\n', StartOfLine);
|
2019-01-07 23:45:19 +08:00
|
|
|
if (NextNL == llvm::StringRef::npos)
|
|
|
|
return llvm::make_error<llvm::StringError>(
|
|
|
|
llvm::formatv("Line value is out of range ({0})", P.line),
|
|
|
|
llvm::errc::invalid_argument);
|
2017-12-19 20:23:48 +08:00
|
|
|
StartOfLine = NextNL + 1;
|
|
|
|
}
|
2019-03-28 01:47:49 +08:00
|
|
|
StringRef Line =
|
|
|
|
Code.substr(StartOfLine).take_until([](char C) { return C == '\n'; });
|
Make positionToOffset return llvm::Expected<size_t>
Summary:
To implement incremental document syncing, we want to verify that the
ranges provided by the front-end are valid. Currently, positionToOffset
deals with invalid Positions by returning 0 or Code.size(), which are
two valid offsets. Instead, return an llvm:Expected<size_t> with an
error if the position is invalid.
According to the LSP, if the character value exceeds the number of
characters of the given line, it should default back to the end of the
line. It makes sense in some contexts to have this behavior, and does
not in other contexts. The AllowColumnsBeyondLineLength parameter
allows to decide what to do in that case, default back to the end of the
line, or return an error.
Reviewers: ilya-biryukov
Subscribers: klimek, ilya-biryukov, jkorous-apple, ioeric, cfe-commits
Differential Revision: https://reviews.llvm.org/D44673
llvm-svn: 328100
2018-03-21 22:36:46 +08:00
|
|
|
|
2019-03-28 22:37:51 +08:00
|
|
|
// P.character may be in UTF-16, transcode if necessary.
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
bool Valid;
|
2019-03-28 22:37:51 +08:00
|
|
|
size_t ByteInLine = measureUnits(Line, P.character, lspEncoding(), Valid);
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
if (!Valid && !AllowColumnsBeyondLineLength)
|
2019-01-07 23:45:19 +08:00
|
|
|
return llvm::make_error<llvm::StringError>(
|
2019-03-28 22:37:51 +08:00
|
|
|
llvm::formatv("{0} offset {1} is invalid for line {2}", lspEncoding(),
|
|
|
|
P.character, P.line),
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::errc::invalid_argument);
|
2019-03-28 22:37:51 +08:00
|
|
|
return StartOfLine + ByteInLine;
|
2017-12-19 20:23:48 +08:00
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
Position offsetToPosition(llvm::StringRef Code, size_t Offset) {
|
2017-12-19 20:23:48 +08:00
|
|
|
Offset = std::min(Code.size(), Offset);
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::StringRef Before = Code.substr(0, Offset);
|
2017-12-19 20:23:48 +08:00
|
|
|
int Lines = Before.count('\n');
|
|
|
|
size_t PrevNL = Before.rfind('\n');
|
2019-01-07 23:45:19 +08:00
|
|
|
size_t StartOfLine = (PrevNL == llvm::StringRef::npos) ? 0 : (PrevNL + 1);
|
2018-02-14 18:52:04 +08:00
|
|
|
Position Pos;
|
|
|
|
Pos.line = Lines;
|
2018-10-23 19:51:53 +08:00
|
|
|
Pos.character = lspLength(Before.substr(StartOfLine));
|
2018-02-14 18:52:04 +08:00
|
|
|
return Pos;
|
2017-12-19 20:23:48 +08:00
|
|
|
}
|
|
|
|
|
2018-02-21 10:39:08 +08:00
|
|
|
Position sourceLocToPosition(const SourceManager &SM, SourceLocation Loc) {
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
// We use the SourceManager's line tables, but its column number is in bytes.
|
|
|
|
FileID FID;
|
|
|
|
unsigned Offset;
|
|
|
|
std::tie(FID, Offset) = SM.getDecomposedSpellingLoc(Loc);
|
2018-02-21 10:39:08 +08:00
|
|
|
Position P;
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
P.line = static_cast<int>(SM.getLineNumber(FID, Offset)) - 1;
|
|
|
|
bool Invalid = false;
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::StringRef Code = SM.getBufferData(FID, &Invalid);
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
if (!Invalid) {
|
|
|
|
auto ColumnInBytes = SM.getColumnNumber(FID, Offset) - 1;
|
|
|
|
auto LineSoFar = Code.substr(Offset - ColumnInBytes, ColumnInBytes);
|
2018-10-23 19:51:53 +08:00
|
|
|
P.character = lspLength(LineSoFar);
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
}
|
2018-02-21 10:39:08 +08:00
|
|
|
return P;
|
|
|
|
}
|
|
|
|
|
2019-08-07 04:25:59 +08:00
|
|
|
bool isSpelledInSource(SourceLocation Loc, const SourceManager &SM) {
|
|
|
|
if (Loc.isMacroID()) {
|
|
|
|
std::string PrintLoc = SM.getSpellingLoc(Loc).printToString(SM);
|
|
|
|
if (llvm::StringRef(PrintLoc).startswith("<scratch") ||
|
|
|
|
llvm::StringRef(PrintLoc).startswith("<command line>"))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
SourceLocation spellingLocIfSpelled(SourceLocation Loc,
|
|
|
|
const SourceManager &SM) {
|
|
|
|
if (!isSpelledInSource(Loc, SM))
|
|
|
|
// Use the expansion location as spelling location is not interesting.
|
|
|
|
return SM.getExpansionRange(Loc).getBegin();
|
|
|
|
return SM.getSpellingLoc(Loc);
|
|
|
|
}
|
|
|
|
|
2019-06-25 16:01:46 +08:00
|
|
|
llvm::Optional<Range> getTokenRange(const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts,
|
|
|
|
SourceLocation TokLoc) {
|
|
|
|
if (!TokLoc.isValid())
|
|
|
|
return llvm::None;
|
|
|
|
SourceLocation End = Lexer::getLocForEndOfToken(TokLoc, 0, SM, LangOpts);
|
|
|
|
if (!End.isValid())
|
|
|
|
return llvm::None;
|
|
|
|
return halfOpenToRange(SM, CharSourceRange::getCharRange(TokLoc, End));
|
|
|
|
}
|
|
|
|
|
2019-10-01 19:03:56 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
enum TokenFlavor { Identifier, Operator, Whitespace, Other };
|
|
|
|
|
|
|
|
bool isOverloadedOperator(const Token &Tok) {
|
|
|
|
switch (Tok.getKind()) {
|
|
|
|
#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemOnly) \
|
|
|
|
case tok::Token:
|
|
|
|
#define OVERLOADED_OPERATOR_MULTI(Name, Spelling, Unary, Binary, MemOnly)
|
|
|
|
#include "clang/Basic/OperatorKinds.def"
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
TokenFlavor getTokenFlavor(SourceLocation Loc, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
Token Tok;
|
|
|
|
Tok.setKind(tok::NUM_TOKENS);
|
|
|
|
if (Lexer::getRawToken(Loc, Tok, SM, LangOpts,
|
|
|
|
/*IgnoreWhiteSpace*/ false))
|
|
|
|
return Other;
|
|
|
|
|
|
|
|
// getRawToken will return false without setting Tok when the token is
|
|
|
|
// whitespace, so if the flag is not set, we are sure this is a whitespace.
|
|
|
|
if (Tok.is(tok::TokenKind::NUM_TOKENS))
|
|
|
|
return Whitespace;
|
|
|
|
if (Tok.is(tok::TokenKind::raw_identifier))
|
|
|
|
return Identifier;
|
|
|
|
if (isOverloadedOperator(Tok))
|
|
|
|
return Operator;
|
|
|
|
return Other;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2019-09-03 23:34:47 +08:00
|
|
|
SourceLocation getBeginningOfIdentifier(const Position &Pos,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
FileID FID = SM.getMainFileID();
|
|
|
|
auto Offset = positionToOffset(SM.getBufferData(FID), Pos);
|
|
|
|
if (!Offset) {
|
|
|
|
log("getBeginningOfIdentifier: {0}", Offset.takeError());
|
|
|
|
return SourceLocation();
|
|
|
|
}
|
|
|
|
|
2019-10-01 19:03:56 +08:00
|
|
|
// GetBeginningOfToken(InputLoc) is almost what we want, but does the wrong
|
|
|
|
// thing if the cursor is at the end of the token (identifier or operator).
|
|
|
|
// The cases are:
|
|
|
|
// 1) at the beginning of the token
|
|
|
|
// 2) at the middle of the token
|
|
|
|
// 3) at the end of the token
|
|
|
|
// 4) anywhere outside the identifier or operator
|
|
|
|
// To distinguish all cases, we lex both at the
|
|
|
|
// GetBeginningOfToken(InputLoc-1) and GetBeginningOfToken(InputLoc), for
|
|
|
|
// cases 1 and 4, we just return the original location.
|
2019-09-03 23:34:47 +08:00
|
|
|
SourceLocation InputLoc = SM.getComposedLoc(FID, *Offset);
|
2019-10-01 19:03:56 +08:00
|
|
|
if (*Offset == 0) // Case 1 or 4.
|
2019-09-04 18:15:27 +08:00
|
|
|
return InputLoc;
|
2019-09-03 23:34:47 +08:00
|
|
|
SourceLocation Before = SM.getComposedLoc(FID, *Offset - 1);
|
2019-10-01 19:03:56 +08:00
|
|
|
SourceLocation BeforeTokBeginning =
|
|
|
|
Lexer::GetBeginningOfToken(Before, SM, LangOpts);
|
|
|
|
TokenFlavor BeforeKind = getTokenFlavor(BeforeTokBeginning, SM, LangOpts);
|
2019-09-03 23:34:47 +08:00
|
|
|
|
2019-10-01 19:03:56 +08:00
|
|
|
SourceLocation CurrentTokBeginning =
|
|
|
|
Lexer::GetBeginningOfToken(InputLoc, SM, LangOpts);
|
|
|
|
TokenFlavor CurrentKind = getTokenFlavor(CurrentTokBeginning, SM, LangOpts);
|
|
|
|
|
|
|
|
// At the middle of the token.
|
|
|
|
if (BeforeTokBeginning == CurrentTokBeginning) {
|
|
|
|
// For interesting token, we return the beginning of the token.
|
|
|
|
if (CurrentKind == Identifier || CurrentKind == Operator)
|
|
|
|
return CurrentTokBeginning;
|
|
|
|
// otherwise, we return the original loc.
|
|
|
|
return InputLoc;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Whitespace is not interesting.
|
|
|
|
if (BeforeKind == Whitespace)
|
|
|
|
return CurrentTokBeginning;
|
|
|
|
if (CurrentKind == Whitespace)
|
|
|
|
return BeforeTokBeginning;
|
|
|
|
|
|
|
|
// The cursor is at the token boundary, e.g. "Before^Current", we prefer
|
|
|
|
// identifiers to other tokens.
|
|
|
|
if (CurrentKind == Identifier)
|
|
|
|
return CurrentTokBeginning;
|
|
|
|
if (BeforeKind == Identifier)
|
|
|
|
return BeforeTokBeginning;
|
|
|
|
// Then prefer overloaded operators to other tokens.
|
|
|
|
if (CurrentKind == Operator)
|
|
|
|
return CurrentTokBeginning;
|
|
|
|
if (BeforeKind == Operator)
|
|
|
|
return BeforeTokBeginning;
|
|
|
|
|
|
|
|
// Non-interesting case, we just return the original location.
|
|
|
|
return InputLoc;
|
2019-09-03 23:34:47 +08:00
|
|
|
}
|
|
|
|
|
2019-02-01 05:30:05 +08:00
|
|
|
bool isValidFileRange(const SourceManager &Mgr, SourceRange R) {
|
|
|
|
if (!R.getBegin().isValid() || !R.getEnd().isValid())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
FileID BeginFID;
|
|
|
|
size_t BeginOffset = 0;
|
|
|
|
std::tie(BeginFID, BeginOffset) = Mgr.getDecomposedLoc(R.getBegin());
|
|
|
|
|
|
|
|
FileID EndFID;
|
|
|
|
size_t EndOffset = 0;
|
|
|
|
std::tie(EndFID, EndOffset) = Mgr.getDecomposedLoc(R.getEnd());
|
|
|
|
|
|
|
|
return BeginFID.isValid() && BeginFID == EndFID && BeginOffset <= EndOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool halfOpenRangeContains(const SourceManager &Mgr, SourceRange R,
|
|
|
|
SourceLocation L) {
|
|
|
|
assert(isValidFileRange(Mgr, R));
|
|
|
|
|
|
|
|
FileID BeginFID;
|
|
|
|
size_t BeginOffset = 0;
|
|
|
|
std::tie(BeginFID, BeginOffset) = Mgr.getDecomposedLoc(R.getBegin());
|
|
|
|
size_t EndOffset = Mgr.getFileOffset(R.getEnd());
|
|
|
|
|
|
|
|
FileID LFid;
|
|
|
|
size_t LOffset;
|
|
|
|
std::tie(LFid, LOffset) = Mgr.getDecomposedLoc(L);
|
|
|
|
return BeginFID == LFid && BeginOffset <= LOffset && LOffset < EndOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool halfOpenRangeTouches(const SourceManager &Mgr, SourceRange R,
|
|
|
|
SourceLocation L) {
|
|
|
|
return L == R.getEnd() || halfOpenRangeContains(Mgr, R, L);
|
|
|
|
}
|
|
|
|
|
2019-08-27 16:44:06 +08:00
|
|
|
SourceLocation includeHashLoc(FileID IncludedFile, const SourceManager &SM) {
|
|
|
|
assert(SM.getLocForEndOfFile(IncludedFile).isFileID());
|
|
|
|
FileID IncludingFile;
|
|
|
|
unsigned Offset;
|
|
|
|
std::tie(IncludingFile, Offset) =
|
|
|
|
SM.getDecomposedExpansionLoc(SM.getIncludeLoc(IncludedFile));
|
|
|
|
bool Invalid = false;
|
|
|
|
llvm::StringRef Buf = SM.getBufferData(IncludingFile, &Invalid);
|
|
|
|
if (Invalid)
|
|
|
|
return SourceLocation();
|
|
|
|
// Now buf is "...\n#include <foo>\n..."
|
|
|
|
// and Offset points here: ^
|
|
|
|
// Rewind to the preceding # on the line.
|
|
|
|
assert(Offset < Buf.size());
|
|
|
|
for (;; --Offset) {
|
|
|
|
if (Buf[Offset] == '#')
|
|
|
|
return SM.getComposedLoc(IncludingFile, Offset);
|
|
|
|
if (Buf[Offset] == '\n' || Offset == 0) // no hash, what's going on?
|
|
|
|
return SourceLocation();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-12 19:42:31 +08:00
|
|
|
static unsigned getTokenLengthAtLoc(SourceLocation Loc, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
Token TheTok;
|
|
|
|
if (Lexer::getRawToken(Loc, TheTok, SM, LangOpts))
|
|
|
|
return 0;
|
|
|
|
// FIXME: Here we check whether the token at the location is a greatergreater
|
|
|
|
// (>>) token and consider it as a single greater (>). This is to get it
|
|
|
|
// working for templates but it isn't correct for the right shift operator. We
|
|
|
|
// can avoid this by using half open char ranges in getFileRange() but getting
|
|
|
|
// token ending is not well supported in macroIDs.
|
|
|
|
if (TheTok.is(tok::greatergreater))
|
|
|
|
return 1;
|
|
|
|
return TheTok.getLength();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns location of the last character of the token at a given loc
|
|
|
|
static SourceLocation getLocForTokenEnd(SourceLocation BeginLoc,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
unsigned Len = getTokenLengthAtLoc(BeginLoc, SM, LangOpts);
|
|
|
|
return BeginLoc.getLocWithOffset(Len ? Len - 1 : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns location of the starting of the token at a given EndLoc
|
|
|
|
static SourceLocation getLocForTokenBegin(SourceLocation EndLoc,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
return EndLoc.getLocWithOffset(
|
|
|
|
-(signed)getTokenLengthAtLoc(EndLoc, SM, LangOpts));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Converts a char source range to a token range.
|
|
|
|
static SourceRange toTokenRange(CharSourceRange Range, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
if (!Range.isTokenRange())
|
|
|
|
Range.setEnd(getLocForTokenBegin(Range.getEnd(), SM, LangOpts));
|
|
|
|
return Range.getAsRange();
|
|
|
|
}
|
|
|
|
// Returns the union of two token ranges.
|
|
|
|
// To find the maximum of the Ends of the ranges, we compare the location of the
|
|
|
|
// last character of the token.
|
|
|
|
static SourceRange unionTokenRange(SourceRange R1, SourceRange R2,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
2019-08-27 16:44:06 +08:00
|
|
|
SourceLocation Begin =
|
|
|
|
SM.isBeforeInTranslationUnit(R1.getBegin(), R2.getBegin())
|
|
|
|
? R1.getBegin()
|
|
|
|
: R2.getBegin();
|
|
|
|
SourceLocation End =
|
|
|
|
SM.isBeforeInTranslationUnit(getLocForTokenEnd(R1.getEnd(), SM, LangOpts),
|
|
|
|
getLocForTokenEnd(R2.getEnd(), SM, LangOpts))
|
|
|
|
? R2.getEnd()
|
|
|
|
: R1.getEnd();
|
|
|
|
return SourceRange(Begin, End);
|
2019-07-12 19:42:31 +08:00
|
|
|
}
|
|
|
|
|
2019-08-27 16:44:06 +08:00
|
|
|
// Given a range whose endpoints may be in different expansions or files,
|
|
|
|
// tries to find a range within a common file by following up the expansion and
|
|
|
|
// include location in each.
|
|
|
|
static SourceRange rangeInCommonFile(SourceRange R, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
2019-08-07 01:01:12 +08:00
|
|
|
// Fast path for most common cases.
|
2019-08-27 16:44:06 +08:00
|
|
|
if (SM.isWrittenInSameFile(R.getBegin(), R.getEnd()))
|
|
|
|
return R;
|
2019-08-07 01:01:12 +08:00
|
|
|
// Record the stack of expansion locations for the beginning, keyed by FileID.
|
|
|
|
llvm::DenseMap<FileID, SourceLocation> BeginExpansions;
|
2019-08-27 16:44:06 +08:00
|
|
|
for (SourceLocation Begin = R.getBegin(); Begin.isValid();
|
2019-08-07 01:01:12 +08:00
|
|
|
Begin = Begin.isFileID()
|
2019-08-27 16:44:06 +08:00
|
|
|
? includeHashLoc(SM.getFileID(Begin), SM)
|
2019-08-07 01:01:12 +08:00
|
|
|
: SM.getImmediateExpansionRange(Begin).getBegin()) {
|
|
|
|
BeginExpansions[SM.getFileID(Begin)] = Begin;
|
|
|
|
}
|
|
|
|
// Move up the stack of expansion locations for the end until we find the
|
|
|
|
// location in BeginExpansions with that has the same file id.
|
2019-08-27 16:44:06 +08:00
|
|
|
for (SourceLocation End = R.getEnd(); End.isValid();
|
|
|
|
End = End.isFileID() ? includeHashLoc(SM.getFileID(End), SM)
|
2019-08-07 01:01:12 +08:00
|
|
|
: toTokenRange(SM.getImmediateExpansionRange(End),
|
|
|
|
SM, LangOpts)
|
|
|
|
.getEnd()) {
|
|
|
|
auto It = BeginExpansions.find(SM.getFileID(End));
|
2019-08-27 16:44:06 +08:00
|
|
|
if (It != BeginExpansions.end()) {
|
|
|
|
if (SM.getFileOffset(It->second) > SM.getFileOffset(End))
|
|
|
|
return SourceLocation();
|
2019-08-07 01:01:12 +08:00
|
|
|
return {It->second, End};
|
2019-08-27 16:44:06 +08:00
|
|
|
}
|
2019-08-07 01:01:12 +08:00
|
|
|
}
|
2019-08-27 16:44:06 +08:00
|
|
|
return SourceRange();
|
2019-08-07 01:01:12 +08:00
|
|
|
}
|
2019-08-27 16:44:06 +08:00
|
|
|
|
|
|
|
// Find an expansion range (not necessarily immediate) the ends of which are in
|
|
|
|
// the same file id.
|
|
|
|
static SourceRange
|
|
|
|
getExpansionTokenRangeInSameFile(SourceLocation Loc, const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
return rangeInCommonFile(
|
|
|
|
toTokenRange(SM.getImmediateExpansionRange(Loc), SM, LangOpts), SM,
|
|
|
|
LangOpts);
|
|
|
|
}
|
|
|
|
|
2019-08-07 01:01:12 +08:00
|
|
|
// Returns the file range for a given Location as a Token Range
|
2019-07-12 19:42:31 +08:00
|
|
|
// This is quite similar to getFileLoc in SourceManager as both use
|
|
|
|
// getImmediateExpansionRange and getImmediateSpellingLoc (for macro IDs).
|
|
|
|
// However:
|
|
|
|
// - We want to maintain the full range information as we move from one file to
|
|
|
|
// the next. getFileLoc only uses the BeginLoc of getImmediateExpansionRange.
|
2019-08-07 01:01:12 +08:00
|
|
|
// - We want to split '>>' tokens as the lexer parses the '>>' in nested
|
|
|
|
// template instantiations as a '>>' instead of two '>'s.
|
2019-07-12 19:42:31 +08:00
|
|
|
// There is also getExpansionRange but it simply calls
|
|
|
|
// getImmediateExpansionRange on the begin and ends separately which is wrong.
|
|
|
|
static SourceRange getTokenFileRange(SourceLocation Loc,
|
|
|
|
const SourceManager &SM,
|
|
|
|
const LangOptions &LangOpts) {
|
|
|
|
SourceRange FileRange = Loc;
|
|
|
|
while (!FileRange.getBegin().isFileID()) {
|
|
|
|
if (SM.isMacroArgExpansion(FileRange.getBegin())) {
|
2019-08-07 01:01:12 +08:00
|
|
|
FileRange = unionTokenRange(
|
|
|
|
SM.getImmediateSpellingLoc(FileRange.getBegin()),
|
|
|
|
SM.getImmediateSpellingLoc(FileRange.getEnd()), SM, LangOpts);
|
2019-08-27 16:44:06 +08:00
|
|
|
assert(SM.isWrittenInSameFile(FileRange.getBegin(), FileRange.getEnd()));
|
2019-07-12 19:42:31 +08:00
|
|
|
} else {
|
2019-08-07 01:01:12 +08:00
|
|
|
SourceRange ExpansionRangeForBegin =
|
|
|
|
getExpansionTokenRangeInSameFile(FileRange.getBegin(), SM, LangOpts);
|
|
|
|
SourceRange ExpansionRangeForEnd =
|
|
|
|
getExpansionTokenRangeInSameFile(FileRange.getEnd(), SM, LangOpts);
|
2019-08-27 16:44:06 +08:00
|
|
|
if (ExpansionRangeForBegin.isInvalid() ||
|
|
|
|
ExpansionRangeForEnd.isInvalid())
|
|
|
|
return SourceRange();
|
|
|
|
assert(SM.isWrittenInSameFile(ExpansionRangeForBegin.getBegin(),
|
|
|
|
ExpansionRangeForEnd.getBegin()) &&
|
2019-08-07 01:01:12 +08:00
|
|
|
"Both Expansion ranges should be in same file.");
|
2019-07-12 19:42:31 +08:00
|
|
|
FileRange = unionTokenRange(ExpansionRangeForBegin, ExpansionRangeForEnd,
|
|
|
|
SM, LangOpts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return FileRange;
|
|
|
|
}
|
|
|
|
|
2019-07-19 16:33:39 +08:00
|
|
|
bool isInsideMainFile(SourceLocation Loc, const SourceManager &SM) {
|
|
|
|
return Loc.isValid() && SM.isWrittenInMainFile(SM.getExpansionLoc(Loc));
|
|
|
|
}
|
|
|
|
|
2019-07-12 19:42:31 +08:00
|
|
|
llvm::Optional<SourceRange> toHalfOpenFileRange(const SourceManager &SM,
|
2019-02-01 05:30:05 +08:00
|
|
|
const LangOptions &LangOpts,
|
|
|
|
SourceRange R) {
|
2019-07-12 19:42:31 +08:00
|
|
|
SourceRange R1 = getTokenFileRange(R.getBegin(), SM, LangOpts);
|
|
|
|
if (!isValidFileRange(SM, R1))
|
2019-02-01 05:30:05 +08:00
|
|
|
return llvm::None;
|
2019-07-12 19:42:31 +08:00
|
|
|
|
|
|
|
SourceRange R2 = getTokenFileRange(R.getEnd(), SM, LangOpts);
|
|
|
|
if (!isValidFileRange(SM, R2))
|
2019-02-01 05:30:05 +08:00
|
|
|
return llvm::None;
|
|
|
|
|
2019-08-27 16:44:06 +08:00
|
|
|
SourceRange Result =
|
|
|
|
rangeInCommonFile(unionTokenRange(R1, R2, SM, LangOpts), SM, LangOpts);
|
2019-07-12 19:42:31 +08:00
|
|
|
unsigned TokLen = getTokenLengthAtLoc(Result.getEnd(), SM, LangOpts);
|
|
|
|
// Convert from closed token range to half-open (char) range
|
|
|
|
Result.setEnd(Result.getEnd().getLocWithOffset(TokLen));
|
|
|
|
if (!isValidFileRange(SM, Result))
|
2019-02-01 05:30:05 +08:00
|
|
|
return llvm::None;
|
2019-07-12 19:42:31 +08:00
|
|
|
|
2019-02-01 05:30:05 +08:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StringRef toSourceCode(const SourceManager &SM, SourceRange R) {
|
|
|
|
assert(isValidFileRange(SM, R));
|
|
|
|
bool Invalid = false;
|
|
|
|
auto *Buf = SM.getBuffer(SM.getFileID(R.getBegin()), &Invalid);
|
|
|
|
assert(!Invalid);
|
|
|
|
|
|
|
|
size_t BeginOffset = SM.getFileOffset(R.getBegin());
|
|
|
|
size_t EndOffset = SM.getFileOffset(R.getEnd());
|
|
|
|
return Buf->getBuffer().substr(BeginOffset, EndOffset - BeginOffset);
|
|
|
|
}
|
|
|
|
|
[clangd] Interfaces for writing code tweaks
Summary:
The code tweaks are an implementation of mini-refactorings exposed
via the LSP code actions. They run in two stages:
- Stage 1. Decides whether the action is available to the user and
collects all the information required to finish the action.
Should be cheap, since this will run over all the actions known to
clangd on each textDocument/codeAction request from the client.
- Stage 2. Uses information from stage 1 to produce the actual edits
that the code action should perform. This stage can be expensive and
will only run if the user chooses to perform the specified action in
the UI.
One unfortunate consequence of this change is increased latency of
processing the textDocument/codeAction requests, which now wait for an
AST. However, we cannot avoid this with what we have available in the LSP
today.
Reviewers: kadircet, ioeric, hokein, sammccall
Reviewed By: sammccall
Subscribers: mgrang, mgorny, MaskRay, jkorous, arphaman, cfe-commits
Differential Revision: https://reviews.llvm.org/D56267
llvm-svn: 352494
2019-01-29 22:17:36 +08:00
|
|
|
llvm::Expected<SourceLocation> sourceLocationInMainFile(const SourceManager &SM,
|
|
|
|
Position P) {
|
|
|
|
llvm::StringRef Code = SM.getBuffer(SM.getMainFileID())->getBuffer();
|
|
|
|
auto Offset =
|
|
|
|
positionToOffset(Code, P, /*AllowColumnBeyondLineLength=*/false);
|
|
|
|
if (!Offset)
|
|
|
|
return Offset.takeError();
|
|
|
|
return SM.getLocForStartOfFile(SM.getMainFileID()).getLocWithOffset(*Offset);
|
|
|
|
}
|
|
|
|
|
2018-03-12 23:28:22 +08:00
|
|
|
Range halfOpenToRange(const SourceManager &SM, CharSourceRange R) {
|
|
|
|
// Clang is 1-based, LSP uses 0-based indexes.
|
|
|
|
Position Begin = sourceLocToPosition(SM, R.getBegin());
|
|
|
|
Position End = sourceLocToPosition(SM, R.getEnd());
|
|
|
|
|
|
|
|
return {Begin, End};
|
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
std::pair<size_t, size_t> offsetToClangLineColumn(llvm::StringRef Code,
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
size_t Offset) {
|
|
|
|
Offset = std::min(Code.size(), Offset);
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::StringRef Before = Code.substr(0, Offset);
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
int Lines = Before.count('\n');
|
|
|
|
size_t PrevNL = Before.rfind('\n');
|
2019-01-07 23:45:19 +08:00
|
|
|
size_t StartOfLine = (PrevNL == llvm::StringRef::npos) ? 0 : (PrevNL + 1);
|
[clangd] Fix unicode handling, using UTF-16 where LSP requires it.
Summary:
The Language Server Protocol unfortunately mandates that locations in files
be represented by line/column pairs, where the "column" is actually an index
into the UTF-16-encoded text of the line.
(This is because VSCode is written in JavaScript, which is UTF-16-native).
Internally clangd treats source files at UTF-8, the One True Encoding, and
generally deals with byte offsets (though there are exceptions).
Before this patch, conversions between offsets and LSP Position pretended
that Position.character was UTF-8 bytes, which is only true for ASCII lines.
Now we examine the text to convert correctly (but don't actually need to
transcode it, due to some nice details of the encodings).
The updated functions in SourceCode are the blessed way to interact with
the Position.character field, and anything else is likely to be wrong.
So I also updated the other accesses:
- CodeComplete needs a "clang-style" line/column, with column in utf-8 bytes.
This is now converted via Position -> offset -> clang line/column
(a new function is added to SourceCode.h for the second conversion).
- getBeginningOfIdentifier skipped backwards in UTF-16 space, which is will
behave badly when it splits a surrogate pair. Skipping backwards in UTF-8
coordinates gives the lexer a fighting chance of getting this right.
While here, I clarified(?) the logic comments, fixed a bug with identifiers
containing digits, simplified the signature slightly and added a test.
This seems likely to cause problems with editors that have the same bug, and
treat the protocol as if columns are UTF-8 bytes. But we can find and fix those.
Reviewers: hokein
Subscribers: klimek, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D46035
llvm-svn: 331029
2018-04-27 19:59:28 +08:00
|
|
|
return {Lines + 1, Offset - StartOfLine + 1};
|
|
|
|
}
|
|
|
|
|
2019-02-01 05:30:05 +08:00
|
|
|
std::pair<StringRef, StringRef> splitQualifiedName(StringRef QName) {
|
[clangd] Implementation of workspace/symbol request
Summary:
This is a basic implementation of the "workspace/symbol" request which is
used to find symbols by a string query. Since this is similar to code completion
in terms of result, this implementation reuses the "fuzzyFind" in order to get
matches. For now, the scoring algorithm is the same as code completion and
improvements could be done in the future.
The index model doesn't contain quite enough symbols for this to cover
common symbols like methods, enum class enumerators, functions in unamed
namespaces, etc. The index model will be augmented separately to achieve this.
Reviewers: sammccall, ilya-biryukov
Reviewed By: sammccall
Subscribers: jkorous, hokein, simark, sammccall, klimek, mgorny, ilya-biryukov, mgrang, jkorous-apple, ioeric, MaskRay, cfe-commits
Differential Revision: https://reviews.llvm.org/D44882
llvm-svn: 330637
2018-04-24 04:00:52 +08:00
|
|
|
size_t Pos = QName.rfind("::");
|
2019-01-07 23:45:19 +08:00
|
|
|
if (Pos == llvm::StringRef::npos)
|
|
|
|
return {llvm::StringRef(), QName};
|
[clangd] Implementation of workspace/symbol request
Summary:
This is a basic implementation of the "workspace/symbol" request which is
used to find symbols by a string query. Since this is similar to code completion
in terms of result, this implementation reuses the "fuzzyFind" in order to get
matches. For now, the scoring algorithm is the same as code completion and
improvements could be done in the future.
The index model doesn't contain quite enough symbols for this to cover
common symbols like methods, enum class enumerators, functions in unamed
namespaces, etc. The index model will be augmented separately to achieve this.
Reviewers: sammccall, ilya-biryukov
Reviewed By: sammccall
Subscribers: jkorous, hokein, simark, sammccall, klimek, mgorny, ilya-biryukov, mgrang, jkorous-apple, ioeric, MaskRay, cfe-commits
Differential Revision: https://reviews.llvm.org/D44882
llvm-svn: 330637
2018-04-24 04:00:52 +08:00
|
|
|
return {QName.substr(0, Pos + 2), QName.substr(Pos + 2)};
|
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
TextEdit replacementToEdit(llvm::StringRef Code,
|
|
|
|
const tooling::Replacement &R) {
|
2018-05-11 20:12:08 +08:00
|
|
|
Range ReplacementRange = {
|
|
|
|
offsetToPosition(Code, R.getOffset()),
|
|
|
|
offsetToPosition(Code, R.getOffset() + R.getLength())};
|
|
|
|
return {ReplacementRange, R.getReplacementText()};
|
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
std::vector<TextEdit> replacementsToEdits(llvm::StringRef Code,
|
2018-05-11 20:12:08 +08:00
|
|
|
const tooling::Replacements &Repls) {
|
|
|
|
std::vector<TextEdit> Edits;
|
|
|
|
for (const auto &R : Repls)
|
|
|
|
Edits.push_back(replacementToEdit(Code, R));
|
|
|
|
return Edits;
|
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::Optional<std::string> getCanonicalPath(const FileEntry *F,
|
|
|
|
const SourceManager &SourceMgr) {
|
2018-12-19 18:46:21 +08:00
|
|
|
if (!F)
|
|
|
|
return None;
|
[clangd] Avoid duplicates in findDefinitions response
Summary:
When compile_commands.json contains some source files expressed as
relative paths, we can get duplicate responses to findDefinitions. The
responses only differ by the URI, which are different versions of the
same file:
"result": [
{
...
"uri": "file:///home/emaisin/src/ls-interact/cpp-test/build/../src/first.h"
},
{
...
"uri": "file:///home/emaisin/src/ls-interact/cpp-test/src/first.h"
}
]
In getAbsoluteFilePath, we try to obtain the realpath of the FileEntry
by calling tryGetRealPathName. However, this can fail and return an
empty string. It may be bug a bug in clang, but in any case we should
fall back to computing it ourselves if it happens.
I changed getAbsoluteFilePath so that if tryGetRealPathName succeeds, we
return right away (a real path is always absolute). Otherwise, we try
to build an absolute path, as we did before, but we also call
VFS->getRealPath to make sure to get the canonical path (e.g. without
any ".." in it).
Reviewers: malaperle
Subscribers: hokein, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D48687
llvm-svn: 339483
2018-08-11 06:27:53 +08:00
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::SmallString<128> FilePath = F->getName();
|
|
|
|
if (!llvm::sys::path::is_absolute(FilePath)) {
|
2018-12-19 18:46:21 +08:00
|
|
|
if (auto EC =
|
2019-03-27 06:32:06 +08:00
|
|
|
SourceMgr.getFileManager().getVirtualFileSystem().makeAbsolute(
|
2018-12-19 18:46:21 +08:00
|
|
|
FilePath)) {
|
|
|
|
elog("Could not turn relative path '{0}' to absolute: {1}", FilePath,
|
|
|
|
EC.message());
|
2018-10-20 23:30:37 +08:00
|
|
|
return None;
|
2018-07-06 03:35:01 +08:00
|
|
|
}
|
|
|
|
}
|
[clangd] Avoid duplicates in findDefinitions response
Summary:
When compile_commands.json contains some source files expressed as
relative paths, we can get duplicate responses to findDefinitions. The
responses only differ by the URI, which are different versions of the
same file:
"result": [
{
...
"uri": "file:///home/emaisin/src/ls-interact/cpp-test/build/../src/first.h"
},
{
...
"uri": "file:///home/emaisin/src/ls-interact/cpp-test/src/first.h"
}
]
In getAbsoluteFilePath, we try to obtain the realpath of the FileEntry
by calling tryGetRealPathName. However, this can fail and return an
empty string. It may be bug a bug in clang, but in any case we should
fall back to computing it ourselves if it happens.
I changed getAbsoluteFilePath so that if tryGetRealPathName succeeds, we
return right away (a real path is always absolute). Otherwise, we try
to build an absolute path, as we did before, but we also call
VFS->getRealPath to make sure to get the canonical path (e.g. without
any ".." in it).
Reviewers: malaperle
Subscribers: hokein, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D48687
llvm-svn: 339483
2018-08-11 06:27:53 +08:00
|
|
|
|
2018-12-19 18:46:21 +08:00
|
|
|
// Handle the symbolic link path case where the current working directory
|
2019-09-09 20:28:44 +08:00
|
|
|
// (getCurrentWorkingDirectory) is a symlink. We always want to the real
|
2018-12-19 18:46:21 +08:00
|
|
|
// file path (instead of the symlink path) for the C++ symbols.
|
|
|
|
//
|
|
|
|
// Consider the following example:
|
|
|
|
//
|
|
|
|
// src dir: /project/src/foo.h
|
|
|
|
// current working directory (symlink): /tmp/build -> /project/src/
|
|
|
|
//
|
|
|
|
// The file path of Symbol is "/project/src/foo.h" instead of
|
|
|
|
// "/tmp/build/foo.h"
|
2019-08-02 05:32:01 +08:00
|
|
|
if (auto Dir = SourceMgr.getFileManager().getDirectory(
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::sys::path::parent_path(FilePath))) {
|
|
|
|
llvm::SmallString<128> RealPath;
|
2019-08-02 05:32:01 +08:00
|
|
|
llvm::StringRef DirName = SourceMgr.getFileManager().getCanonicalName(*Dir);
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::sys::path::append(RealPath, DirName,
|
|
|
|
llvm::sys::path::filename(FilePath));
|
2018-12-19 18:46:21 +08:00
|
|
|
return RealPath.str().str();
|
[clangd] Avoid duplicates in findDefinitions response
Summary:
When compile_commands.json contains some source files expressed as
relative paths, we can get duplicate responses to findDefinitions. The
responses only differ by the URI, which are different versions of the
same file:
"result": [
{
...
"uri": "file:///home/emaisin/src/ls-interact/cpp-test/build/../src/first.h"
},
{
...
"uri": "file:///home/emaisin/src/ls-interact/cpp-test/src/first.h"
}
]
In getAbsoluteFilePath, we try to obtain the realpath of the FileEntry
by calling tryGetRealPathName. However, this can fail and return an
empty string. It may be bug a bug in clang, but in any case we should
fall back to computing it ourselves if it happens.
I changed getAbsoluteFilePath so that if tryGetRealPathName succeeds, we
return right away (a real path is always absolute). Otherwise, we try
to build an absolute path, as we did before, but we also call
VFS->getRealPath to make sure to get the canonical path (e.g. without
any ".." in it).
Reviewers: malaperle
Subscribers: hokein, ilya-biryukov, ioeric, MaskRay, jkorous, cfe-commits
Differential Revision: https://reviews.llvm.org/D48687
llvm-svn: 339483
2018-08-11 06:27:53 +08:00
|
|
|
}
|
|
|
|
|
2018-12-19 18:46:21 +08:00
|
|
|
return FilePath.str().str();
|
2018-07-06 03:35:01 +08:00
|
|
|
}
|
|
|
|
|
2018-08-08 16:59:29 +08:00
|
|
|
TextEdit toTextEdit(const FixItHint &FixIt, const SourceManager &M,
|
|
|
|
const LangOptions &L) {
|
|
|
|
TextEdit Result;
|
|
|
|
Result.range =
|
|
|
|
halfOpenToRange(M, Lexer::makeFileCharRange(FixIt.RemoveRange, M, L));
|
|
|
|
Result.newText = FixIt.CodeToInsert;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2019-01-25 23:14:03 +08:00
|
|
|
bool isRangeConsecutive(const Range &Left, const Range &Right) {
|
2018-08-13 16:23:01 +08:00
|
|
|
return Left.end.line == Right.start.line &&
|
|
|
|
Left.end.character == Right.start.character;
|
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
FileDigest digest(llvm::StringRef Content) {
|
[clangd] Use xxhash instead of SHA1 for background index file digests.
Summary:
Currently SHA1 is about 10% of our CPU, this patch reduces it to ~1%.
xxhash is a well-defined (stable) non-cryptographic hash optimized for
fast checksums (like crc32).
Collisions shouldn't be a problem, despite the reduced length:
- for actual file content (used to invalidate bg index shards), there
are only two versions that can collide (new shard and old shard).
- for file paths in bg index shard filenames, we would need 2^32 files
with the same filename to expect a collision. Imperfect hashing may
reduce this a bit but it's well beyond what's plausible.
This will invalidate shards on disk (as usual; I bumped the version),
but this time the filenames are changing so the old files will stick
around :-( So this is more expensive than the usual bump, but would be
good to land before the v9 branch when everyone will start using bg index.
Reviewers: kadircet
Subscribers: ilya-biryukov, MaskRay, jkorous, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64306
llvm-svn: 365311
2019-07-08 19:33:17 +08:00
|
|
|
uint64_t Hash{llvm::xxHash64(Content)};
|
|
|
|
FileDigest Result;
|
|
|
|
for (unsigned I = 0; I < Result.size(); ++I) {
|
|
|
|
Result[I] = uint8_t(Hash);
|
|
|
|
Hash >>= 8;
|
|
|
|
}
|
|
|
|
return Result;
|
2018-11-28 00:08:53 +08:00
|
|
|
}
|
|
|
|
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::Optional<FileDigest> digestFile(const SourceManager &SM, FileID FID) {
|
2018-11-28 00:08:53 +08:00
|
|
|
bool Invalid = false;
|
2019-01-07 23:45:19 +08:00
|
|
|
llvm::StringRef Content = SM.getBufferData(FID, &Invalid);
|
2018-11-28 00:08:53 +08:00
|
|
|
if (Invalid)
|
|
|
|
return None;
|
|
|
|
return digest(Content);
|
|
|
|
}
|
|
|
|
|
2019-01-28 22:01:55 +08:00
|
|
|
format::FormatStyle getFormatStyleForFile(llvm::StringRef File,
|
|
|
|
llvm::StringRef Content,
|
|
|
|
llvm::vfs::FileSystem *FS) {
|
|
|
|
auto Style = format::getStyle(format::DefaultFormatStyle, File,
|
|
|
|
format::DefaultFallbackStyle, Content, FS);
|
|
|
|
if (!Style) {
|
|
|
|
log("getStyle() failed for file {0}: {1}. Fallback is LLVM style.", File,
|
|
|
|
Style.takeError());
|
|
|
|
Style = format::getLLVMStyle();
|
|
|
|
}
|
|
|
|
return *Style;
|
|
|
|
}
|
|
|
|
|
2019-02-06 23:24:50 +08:00
|
|
|
llvm::Expected<tooling::Replacements>
|
|
|
|
cleanupAndFormat(StringRef Code, const tooling::Replacements &Replaces,
|
|
|
|
const format::FormatStyle &Style) {
|
|
|
|
auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style);
|
|
|
|
if (!CleanReplaces)
|
|
|
|
return CleanReplaces;
|
|
|
|
return formatReplacements(Code, std::move(*CleanReplaces), Style);
|
|
|
|
}
|
|
|
|
|
2019-04-26 15:45:49 +08:00
|
|
|
template <typename Action>
|
|
|
|
static void lex(llvm::StringRef Code, const format::FormatStyle &Style,
|
|
|
|
Action A) {
|
|
|
|
// FIXME: InMemoryFileAdapter crashes unless the buffer is null terminated!
|
|
|
|
std::string NullTerminatedCode = Code.str();
|
|
|
|
SourceManagerForFile FileSM("dummy.cpp", NullTerminatedCode);
|
2019-04-11 17:36:36 +08:00
|
|
|
auto &SM = FileSM.get();
|
|
|
|
auto FID = SM.getMainFileID();
|
|
|
|
Lexer Lex(FID, SM.getBuffer(FID), SM, format::getFormattingLangOpts(Style));
|
|
|
|
Token Tok;
|
|
|
|
|
2019-04-26 15:45:49 +08:00
|
|
|
while (!Lex.LexFromRawLexer(Tok))
|
|
|
|
A(Tok);
|
2019-09-25 22:12:05 +08:00
|
|
|
// LexFromRawLexer returns true after it lexes last token, so we still have
|
|
|
|
// one more token to report.
|
|
|
|
A(Tok);
|
2019-04-26 15:45:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::StringMap<unsigned> collectIdentifiers(llvm::StringRef Content,
|
|
|
|
const format::FormatStyle &Style) {
|
2019-04-11 17:36:36 +08:00
|
|
|
llvm::StringMap<unsigned> Identifiers;
|
2019-04-26 15:45:49 +08:00
|
|
|
lex(Content, Style, [&](const clang::Token &Tok) {
|
2019-04-11 17:36:36 +08:00
|
|
|
switch (Tok.getKind()) {
|
|
|
|
case tok::identifier:
|
|
|
|
++Identifiers[Tok.getIdentifierInfo()->getName()];
|
|
|
|
break;
|
|
|
|
case tok::raw_identifier:
|
|
|
|
++Identifiers[Tok.getRawIdentifier()];
|
|
|
|
break;
|
|
|
|
default:
|
2019-04-26 15:45:49 +08:00
|
|
|
break;
|
2019-04-11 17:36:36 +08:00
|
|
|
}
|
2019-04-26 15:45:49 +08:00
|
|
|
});
|
2019-04-11 17:36:36 +08:00
|
|
|
return Identifiers;
|
|
|
|
}
|
|
|
|
|
2019-04-26 15:45:49 +08:00
|
|
|
namespace {
|
|
|
|
enum NamespaceEvent {
|
|
|
|
BeginNamespace, // namespace <ns> {. Payload is resolved <ns>.
|
|
|
|
EndNamespace, // } // namespace <ns>. Payload is resolved *outer* namespace.
|
|
|
|
UsingDirective // using namespace <ns>. Payload is unresolved <ns>.
|
|
|
|
};
|
|
|
|
// Scans C++ source code for constructs that change the visible namespaces.
|
|
|
|
void parseNamespaceEvents(
|
|
|
|
llvm::StringRef Code, const format::FormatStyle &Style,
|
|
|
|
llvm::function_ref<void(NamespaceEvent, llvm::StringRef)> Callback) {
|
|
|
|
|
|
|
|
// Stack of enclosing namespaces, e.g. {"clang", "clangd"}
|
|
|
|
std::vector<std::string> Enclosing; // Contains e.g. "clang", "clangd"
|
|
|
|
// Stack counts open braces. true if the brace opened a namespace.
|
|
|
|
std::vector<bool> BraceStack;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
Default,
|
|
|
|
Namespace, // just saw 'namespace'
|
|
|
|
NamespaceName, // just saw 'namespace' NSName
|
|
|
|
Using, // just saw 'using'
|
|
|
|
UsingNamespace, // just saw 'using namespace'
|
|
|
|
UsingNamespaceName, // just saw 'using namespace' NSName
|
|
|
|
} State = Default;
|
|
|
|
std::string NSName;
|
|
|
|
|
|
|
|
lex(Code, Style, [&](const clang::Token &Tok) {
|
|
|
|
switch(Tok.getKind()) {
|
|
|
|
case tok::raw_identifier:
|
|
|
|
// In raw mode, this could be a keyword or a name.
|
|
|
|
switch (State) {
|
|
|
|
case UsingNamespace:
|
|
|
|
case UsingNamespaceName:
|
|
|
|
NSName.append(Tok.getRawIdentifier());
|
|
|
|
State = UsingNamespaceName;
|
|
|
|
break;
|
|
|
|
case Namespace:
|
|
|
|
case NamespaceName:
|
|
|
|
NSName.append(Tok.getRawIdentifier());
|
|
|
|
State = NamespaceName;
|
|
|
|
break;
|
|
|
|
case Using:
|
|
|
|
State =
|
|
|
|
(Tok.getRawIdentifier() == "namespace") ? UsingNamespace : Default;
|
|
|
|
break;
|
|
|
|
case Default:
|
|
|
|
NSName.clear();
|
|
|
|
if (Tok.getRawIdentifier() == "namespace")
|
|
|
|
State = Namespace;
|
|
|
|
else if (Tok.getRawIdentifier() == "using")
|
|
|
|
State = Using;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case tok::coloncolon:
|
|
|
|
// This can come at the beginning or in the middle of a namespace name.
|
|
|
|
switch (State) {
|
|
|
|
case UsingNamespace:
|
|
|
|
case UsingNamespaceName:
|
|
|
|
NSName.append("::");
|
|
|
|
State = UsingNamespaceName;
|
|
|
|
break;
|
|
|
|
case NamespaceName:
|
|
|
|
NSName.append("::");
|
|
|
|
State = NamespaceName;
|
|
|
|
break;
|
|
|
|
case Namespace: // Not legal here.
|
|
|
|
case Using:
|
|
|
|
case Default:
|
|
|
|
State = Default;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case tok::l_brace:
|
|
|
|
// Record which { started a namespace, so we know when } ends one.
|
|
|
|
if (State == NamespaceName) {
|
|
|
|
// Parsed: namespace <name> {
|
|
|
|
BraceStack.push_back(true);
|
|
|
|
Enclosing.push_back(NSName);
|
|
|
|
Callback(BeginNamespace, llvm::join(Enclosing, "::"));
|
|
|
|
} else {
|
|
|
|
// This case includes anonymous namespaces (State = Namespace).
|
|
|
|
// For our purposes, they're not namespaces and we ignore them.
|
|
|
|
BraceStack.push_back(false);
|
|
|
|
}
|
|
|
|
State = Default;
|
|
|
|
break;
|
|
|
|
case tok::r_brace:
|
|
|
|
// If braces are unmatched, we're going to be confused, but don't crash.
|
|
|
|
if (!BraceStack.empty()) {
|
|
|
|
if (BraceStack.back()) {
|
|
|
|
// Parsed: } // namespace
|
|
|
|
Enclosing.pop_back();
|
|
|
|
Callback(EndNamespace, llvm::join(Enclosing, "::"));
|
|
|
|
}
|
|
|
|
BraceStack.pop_back();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case tok::semi:
|
|
|
|
if (State == UsingNamespaceName)
|
|
|
|
// Parsed: using namespace <name> ;
|
|
|
|
Callback(UsingDirective, llvm::StringRef(NSName));
|
|
|
|
State = Default;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
State = Default;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the prefix namespaces of NS: {"" ... NS}.
|
|
|
|
llvm::SmallVector<llvm::StringRef, 8> ancestorNamespaces(llvm::StringRef NS) {
|
|
|
|
llvm::SmallVector<llvm::StringRef, 8> Results;
|
|
|
|
Results.push_back(NS.take_front(0));
|
|
|
|
NS.split(Results, "::", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
|
|
|
|
for (llvm::StringRef &R : Results)
|
|
|
|
R = NS.take_front(R.end() - NS.begin());
|
|
|
|
return Results;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
std::vector<std::string> visibleNamespaces(llvm::StringRef Code,
|
|
|
|
const format::FormatStyle &Style) {
|
|
|
|
std::string Current;
|
|
|
|
// Map from namespace to (resolved) namespaces introduced via using directive.
|
|
|
|
llvm::StringMap<llvm::StringSet<>> UsingDirectives;
|
|
|
|
|
|
|
|
parseNamespaceEvents(Code, Style,
|
|
|
|
[&](NamespaceEvent Event, llvm::StringRef NS) {
|
|
|
|
switch (Event) {
|
|
|
|
case BeginNamespace:
|
|
|
|
case EndNamespace:
|
|
|
|
Current = NS;
|
|
|
|
break;
|
|
|
|
case UsingDirective:
|
|
|
|
if (NS.consume_front("::"))
|
|
|
|
UsingDirectives[Current].insert(NS);
|
|
|
|
else {
|
|
|
|
for (llvm::StringRef Enclosing :
|
|
|
|
ancestorNamespaces(Current)) {
|
|
|
|
if (Enclosing.empty())
|
|
|
|
UsingDirectives[Current].insert(NS);
|
|
|
|
else
|
|
|
|
UsingDirectives[Current].insert(
|
|
|
|
(Enclosing + "::" + NS).str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
std::vector<std::string> Found;
|
|
|
|
for (llvm::StringRef Enclosing : ancestorNamespaces(Current)) {
|
|
|
|
Found.push_back(Enclosing);
|
|
|
|
auto It = UsingDirectives.find(Enclosing);
|
|
|
|
if (It != UsingDirectives.end())
|
|
|
|
for (const auto& Used : It->second)
|
|
|
|
Found.push_back(Used.getKey());
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::sort(Found, [&](const std::string &LHS, const std::string &RHS) {
|
|
|
|
if (Current == RHS)
|
|
|
|
return false;
|
|
|
|
if (Current == LHS)
|
|
|
|
return true;
|
|
|
|
return LHS < RHS;
|
|
|
|
});
|
|
|
|
Found.erase(std::unique(Found.begin(), Found.end()), Found.end());
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
|
2019-05-06 18:25:10 +08:00
|
|
|
llvm::StringSet<> collectWords(llvm::StringRef Content) {
|
|
|
|
// We assume short words are not significant.
|
|
|
|
// We may want to consider other stopwords, e.g. language keywords.
|
|
|
|
// (A very naive implementation showed no benefit, but lexing might do better)
|
|
|
|
static constexpr int MinWordLength = 4;
|
|
|
|
|
|
|
|
std::vector<CharRole> Roles(Content.size());
|
|
|
|
calculateRoles(Content, Roles);
|
|
|
|
|
|
|
|
llvm::StringSet<> Result;
|
|
|
|
llvm::SmallString<256> Word;
|
|
|
|
auto Flush = [&] {
|
|
|
|
if (Word.size() >= MinWordLength) {
|
|
|
|
for (char &C : Word)
|
|
|
|
C = llvm::toLower(C);
|
|
|
|
Result.insert(Word);
|
|
|
|
}
|
|
|
|
Word.clear();
|
|
|
|
};
|
|
|
|
for (unsigned I = 0; I < Content.size(); ++I) {
|
|
|
|
switch (Roles[I]) {
|
|
|
|
case Head:
|
|
|
|
Flush();
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
case Tail:
|
|
|
|
Word.push_back(Content[I]);
|
|
|
|
break;
|
|
|
|
case Unknown:
|
|
|
|
case Separator:
|
|
|
|
Flush();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Flush();
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2019-07-01 17:26:48 +08:00
|
|
|
llvm::Optional<DefinedMacro> locateMacroAt(SourceLocation Loc,
|
|
|
|
Preprocessor &PP) {
|
|
|
|
const auto &SM = PP.getSourceManager();
|
|
|
|
const auto &LangOpts = PP.getLangOpts();
|
|
|
|
Token Result;
|
|
|
|
if (Lexer::getRawToken(SM.getSpellingLoc(Loc), Result, SM, LangOpts, false))
|
|
|
|
return None;
|
|
|
|
if (Result.is(tok::raw_identifier))
|
|
|
|
PP.LookUpIdentifierInfo(Result);
|
|
|
|
IdentifierInfo *IdentifierInfo = Result.getIdentifierInfo();
|
|
|
|
if (!IdentifierInfo || !IdentifierInfo->hadMacroDefinition())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
std::pair<FileID, unsigned int> DecLoc = SM.getDecomposedExpansionLoc(Loc);
|
|
|
|
// Get the definition just before the searched location so that a macro
|
|
|
|
// referenced in a '#undef MACRO' can still be found.
|
|
|
|
SourceLocation BeforeSearchedLocation =
|
|
|
|
SM.getMacroArgExpandedLocation(SM.getLocForStartOfFile(DecLoc.first)
|
|
|
|
.getLocWithOffset(DecLoc.second - 1));
|
|
|
|
MacroDefinition MacroDef =
|
|
|
|
PP.getMacroDefinitionAtLoc(IdentifierInfo, BeforeSearchedLocation);
|
|
|
|
if (auto *MI = MacroDef.getMacroInfo())
|
|
|
|
return DefinedMacro{IdentifierInfo->getName(), MI};
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2019-09-09 20:28:44 +08:00
|
|
|
llvm::Expected<std::string> Edit::apply() const {
|
|
|
|
return tooling::applyAllReplacements(InitialCode, Replacements);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<TextEdit> Edit::asTextEdits() const {
|
|
|
|
return replacementsToEdits(InitialCode, Replacements);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Edit::canApplyTo(llvm::StringRef Code) const {
|
|
|
|
// Create line iterators, since line numbers are important while applying our
|
|
|
|
// edit we cannot skip blank lines.
|
|
|
|
auto LHS = llvm::MemoryBuffer::getMemBuffer(Code);
|
|
|
|
llvm::line_iterator LHSIt(*LHS, /*SkipBlanks=*/false);
|
|
|
|
|
|
|
|
auto RHS = llvm::MemoryBuffer::getMemBuffer(InitialCode);
|
|
|
|
llvm::line_iterator RHSIt(*RHS, /*SkipBlanks=*/false);
|
|
|
|
|
|
|
|
// Compare the InitialCode we prepared the edit for with the Code we received
|
|
|
|
// line by line to make sure there are no differences.
|
|
|
|
// FIXME: This check is too conservative now, it should be enough to only
|
|
|
|
// check lines around the replacements contained inside the Edit.
|
|
|
|
while (!LHSIt.is_at_eof() && !RHSIt.is_at_eof()) {
|
|
|
|
if (*LHSIt != *RHSIt)
|
|
|
|
return false;
|
|
|
|
++LHSIt;
|
|
|
|
++RHSIt;
|
|
|
|
}
|
|
|
|
|
|
|
|
// After we reach EOF for any of the files we make sure the other one doesn't
|
|
|
|
// contain any additional content except empty lines, they should not
|
|
|
|
// interfere with the edit we produced.
|
|
|
|
while (!LHSIt.is_at_eof()) {
|
|
|
|
if (!LHSIt->empty())
|
|
|
|
return false;
|
|
|
|
++LHSIt;
|
|
|
|
}
|
|
|
|
while (!RHSIt.is_at_eof()) {
|
|
|
|
if (!RHSIt->empty())
|
|
|
|
return false;
|
|
|
|
++RHSIt;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Error reformatEdit(Edit &E, const format::FormatStyle &Style) {
|
|
|
|
if (auto NewEdits = cleanupAndFormat(E.InitialCode, E.Replacements, Style))
|
|
|
|
E.Replacements = std::move(*NewEdits);
|
|
|
|
else
|
|
|
|
return NewEdits.takeError();
|
|
|
|
return llvm::Error::success();
|
|
|
|
}
|
|
|
|
|
2017-12-19 20:23:48 +08:00
|
|
|
} // namespace clangd
|
|
|
|
} // namespace clang
|